[llvm] 995835f - [SelectionDAG] Add support for the 3-way comparison intrinsics [US]CMP (#91871)

via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 17 02:16:57 PDT 2024


Author: Poseydon42
Date: 2024-06-17T11:16:52+02:00
New Revision: 995835fe6d4dd7467d8b0b1dbe6a3d9547d900c8

URL: https://github.com/llvm/llvm-project/commit/995835fe6d4dd7467d8b0b1dbe6a3d9547d900c8
DIFF: https://github.com/llvm/llvm-project/commit/995835fe6d4dd7467d8b0b1dbe6a3d9547d900c8.diff

LOG: [SelectionDAG] Add support for the 3-way comparison intrinsics [US]CMP (#91871)

This PR adds initial support for the `scmp`/`ucmp` 3-way comparison
intrinsics in the SelectionDAG. Some of the expansions/lowerings
are not optimal yet.

Added: 
    llvm/test/CodeGen/AArch64/scmp.ll
    llvm/test/CodeGen/AArch64/ucmp.ll
    llvm/test/CodeGen/X86/scmp.ll
    llvm/test/CodeGen/X86/ucmp.ll

Modified: 
    llvm/include/llvm/CodeGen/ISDOpcodes.h
    llvm/include/llvm/CodeGen/TargetLowering.h
    llvm/include/llvm/Target/TargetSelectionDAG.td
    llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/lib/CodeGen/TargetLoweringBase.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index c8c86ed5eef29..6bb89fb58a296 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -678,6 +678,12 @@ enum NodeType {
   UMIN,
   UMAX,
 
+  /// [US]CMP - 3-way comparison of signed or unsigned integers. Returns -1, 0,
+  /// or 1 depending on whether Op0 <, ==, or > Op1. The operands can have type
+  /// 
diff erent to the result.
+  SCMP,
+  UCMP,
+
   /// Bitwise operators - logical and, logical or, logical xor.
   AND,
   OR,

diff  --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 3074ece787a08..06f7ee2a589c8 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -5427,6 +5427,10 @@ class TargetLowering : public TargetLoweringBase {
   /// method accepts integers as its arguments.
   SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const;
 
+  /// Method for building the DAG expansion of ISD::[US]CMP. This
+  /// method accepts integers as its arguments
+  SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const;
+
   /// Method for building the DAG expansion of ISD::[US]SHLSAT. This
   /// method accepts integers as its arguments.
   SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const;

diff  --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 4ea02e6aa7f00..8cbf98cd58ca9 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -434,6 +434,11 @@ def umin       : SDNode<"ISD::UMIN"      , SDTIntBinOp,
 def umax       : SDNode<"ISD::UMAX"      , SDTIntBinOp,
                                   [SDNPCommutative, SDNPAssociative]>;
 
+def scmp       : SDNode<"ISD::SCMP"      , SDTIntBinOp,
+                                  []>;
+def ucmp       : SDNode<"ISD::UCMP"      , SDTIntBinOp,
+                                  []>;
+
 def saddsat    : SDNode<"ISD::SADDSAT"   , SDTIntBinOp, [SDNPCommutative]>;
 def uaddsat    : SDNode<"ISD::UADDSAT"   , SDTIntBinOp, [SDNPCommutative]>;
 def ssubsat    : SDNode<"ISD::SSUBSAT"   , SDTIntBinOp>;

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 2c7148d3a7033..dfc24f01eb112 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -1153,6 +1153,8 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
   case ISD::USUBSAT:
   case ISD::SSHLSAT:
   case ISD::USHLSAT:
+  case ISD::SCMP:
+  case ISD::UCMP:
   case ISD::FP_TO_SINT_SAT:
   case ISD::FP_TO_UINT_SAT:
     Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
@@ -3885,6 +3887,10 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
   case ISD::USUBSAT:
     Results.push_back(TLI.expandAddSubSat(Node, DAG));
     break;
+  case ISD::SCMP:
+  case ISD::UCMP:
+    Results.push_back(TLI.expandCMP(Node, DAG));
+    break;
   case ISD::SSHLSAT:
   case ISD::USHLSAT:
     Results.push_back(TLI.expandShlSat(Node, DAG));

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index f435a363051a9..a058b509b3aca 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -236,6 +236,11 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
     Res = PromoteIntRes_ADDSUBSHLSAT<VPMatchContext>(N);
     break;
 
+  case ISD::SCMP:
+  case ISD::UCMP:
+    Res = PromoteIntRes_CMP(N);
+    break;
+
   case ISD::SMULFIX:
   case ISD::SMULFIXSAT:
   case ISD::UMULFIX:
@@ -1261,6 +1266,13 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SADDSUBO(SDNode *N, unsigned ResNo) {
   return Res;
 }
 
+SDValue DAGTypeLegalizer::PromoteIntRes_CMP(SDNode *N) {
+  EVT PromotedResultTy =
+      TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+  return DAG.getNode(N->getOpcode(), SDLoc(N), PromotedResultTy,
+                     N->getOperand(0), N->getOperand(1));
+}
+
 SDValue DAGTypeLegalizer::PromoteIntRes_Select(SDNode *N) {
   SDValue Mask = N->getOperand(0);
 
@@ -1923,6 +1935,9 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
   case ISD::ROTL:
   case ISD::ROTR: Res = PromoteIntOp_Shift(N); break;
 
+  case ISD::SCMP:
+  case ISD::UCMP: Res = PromoteIntOp_CMP(N); break;
+
   case ISD::FSHL:
   case ISD::FSHR: Res = PromoteIntOp_FunnelShift(N); break;
 
@@ -2233,6 +2248,17 @@ SDValue DAGTypeLegalizer::PromoteIntOp_Shift(SDNode *N) {
                                 ZExtPromotedInteger(N->getOperand(1))), 0);
 }
 
+SDValue DAGTypeLegalizer::PromoteIntOp_CMP(SDNode *N) {
+  SDValue LHS = N->getOpcode() == ISD::UCMP
+                    ? ZExtPromotedInteger(N->getOperand(0))
+                    : SExtPromotedInteger(N->getOperand(0));
+  SDValue RHS = N->getOpcode() == ISD::UCMP
+                    ? ZExtPromotedInteger(N->getOperand(1))
+                    : SExtPromotedInteger(N->getOperand(1));
+
+  return SDValue(DAG.UpdateNodeOperands(N, LHS, RHS), 0);
+}
+
 SDValue DAGTypeLegalizer::PromoteIntOp_FunnelShift(SDNode *N) {
   return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), N->getOperand(1),
                                 ZExtPromotedInteger(N->getOperand(2))), 0);
@@ -2788,6 +2814,9 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
   case ISD::UMIN:
   case ISD::SMIN: ExpandIntRes_MINMAX(N, Lo, Hi); break;
 
+  case ISD::SCMP:
+  case ISD::UCMP: ExpandIntRes_CMP(N, Lo, Hi); break;
+
   case ISD::ADD:
   case ISD::SUB: ExpandIntRes_ADDSUB(N, Lo, Hi); break;
 
@@ -3285,6 +3314,11 @@ void DAGTypeLegalizer::ExpandIntRes_MINMAX(SDNode *N,
   SplitInteger(Result, Lo, Hi);
 }
 
+void DAGTypeLegalizer::ExpandIntRes_CMP(SDNode *N, SDValue &Lo, SDValue &Hi) {
+  SDValue ExpandedCMP = TLI.expandCMP(N, DAG);
+  SplitInteger(ExpandedCMP, Lo, Hi);
+}
+
 void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
                                            SDValue &Lo, SDValue &Hi) {
   SDLoc dl(N);
@@ -5194,6 +5228,9 @@ bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
   case ISD::RETURNADDR:
   case ISD::FRAMEADDR:         Res = ExpandIntOp_RETURNADDR(N); break;
 
+  case ISD::SCMP:
+  case ISD::UCMP:              Res = ExpandIntOp_CMP(N); break;
+
   case ISD::ATOMIC_STORE:      Res = ExpandIntOp_ATOMIC_STORE(N); break;
   case ISD::STACKMAP:
     Res = ExpandIntOp_STACKMAP(N, OpNo);
@@ -5455,6 +5492,10 @@ SDValue DAGTypeLegalizer::ExpandIntOp_Shift(SDNode *N) {
   return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Lo), 0);
 }
 
+SDValue DAGTypeLegalizer::ExpandIntOp_CMP(SDNode *N) {
+  return TLI.expandCMP(N, DAG);
+}
+
 SDValue DAGTypeLegalizer::ExpandIntOp_RETURNADDR(SDNode *N) {
   // The argument of RETURNADDR / FRAMEADDR builtin is 32 bit contant.  This
   // surely makes pretty nice problems on 8/16 bit targets. Just truncate this

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 82c39f46137da..85f947efe2c75 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -343,6 +343,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntRes_Overflow(SDNode *N);
   SDValue PromoteIntRes_FFREXP(SDNode *N);
   SDValue PromoteIntRes_SADDSUBO(SDNode *N, unsigned ResNo);
+  SDValue PromoteIntRes_CMP(SDNode *N);
   SDValue PromoteIntRes_Select(SDNode *N);
   SDValue PromoteIntRes_SELECT_CC(SDNode *N);
   SDValue PromoteIntRes_SETCC(SDNode *N);
@@ -394,6 +395,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo);
   SDValue PromoteIntOp_SETCC(SDNode *N, unsigned OpNo);
   SDValue PromoteIntOp_Shift(SDNode *N);
+  SDValue PromoteIntOp_CMP(SDNode *N);
   SDValue PromoteIntOp_FunnelShift(SDNode *N);
   SDValue PromoteIntOp_SIGN_EXTEND(SDNode *N);
   SDValue PromoteIntOp_VP_SIGN_EXTEND(SDNode *N);
@@ -476,6 +478,8 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
 
   void ExpandIntRes_MINMAX            (SDNode *N, SDValue &Lo, SDValue &Hi);
 
+  void ExpandIntRes_CMP               (SDNode *N, SDValue &Lo, SDValue &Hi);
+
   void ExpandIntRes_SADDSUBO          (SDNode *N, SDValue &Lo, SDValue &Hi);
   void ExpandIntRes_UADDSUBO          (SDNode *N, SDValue &Lo, SDValue &Hi);
   void ExpandIntRes_XMULO             (SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -505,6 +509,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue ExpandIntOp_SETCC(SDNode *N);
   SDValue ExpandIntOp_SETCCCARRY(SDNode *N);
   SDValue ExpandIntOp_Shift(SDNode *N);
+  SDValue ExpandIntOp_CMP(SDNode *N);
   SDValue ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo);
   SDValue ExpandIntOp_TRUNCATE(SDNode *N);
   SDValue ExpandIntOp_XINT_TO_FP(SDNode *N);
@@ -805,6 +810,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   void ScalarizeVectorResult(SDNode *N, unsigned ResNo);
   SDValue ScalarizeVecRes_MERGE_VALUES(SDNode *N, unsigned ResNo);
   SDValue ScalarizeVecRes_BinOp(SDNode *N);
+  SDValue ScalarizeVecRes_CMP(SDNode *N);
   SDValue ScalarizeVecRes_TernaryOp(SDNode *N);
   SDValue ScalarizeVecRes_UnaryOp(SDNode *N);
   SDValue ScalarizeVecRes_StrictFPOp(SDNode *N);
@@ -849,6 +855,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue ScalarizeVecOp_STRICT_FP_EXTEND(SDNode *N);
   SDValue ScalarizeVecOp_VECREDUCE(SDNode *N);
   SDValue ScalarizeVecOp_VECREDUCE_SEQ(SDNode *N);
+  SDValue ScalarizeVecOp_CMP(SDNode *N);
 
   //===--------------------------------------------------------------------===//
   // Vector Splitting Support: LegalizeVectorTypes.cpp
@@ -879,6 +886,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   void SplitVectorResult(SDNode *N, unsigned ResNo);
   void SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_TernaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
+  void SplitVecRes_CMP(SDNode *N, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_ADDRSPACECAST(SDNode *N, SDValue &Lo, SDValue &Hi);
   void SplitVecRes_FFREXP(SDNode *N, unsigned ResNo, SDValue &Lo, SDValue &Hi);
@@ -943,6 +951,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue SplitVecOp_VSETCC(SDNode *N);
   SDValue SplitVecOp_FP_ROUND(SDNode *N);
   SDValue SplitVecOp_FPOpDifferentTypes(SDNode *N);
+  SDValue SplitVecOp_CMP(SDNode *N);
   SDValue SplitVecOp_FP_TO_XINT_SAT(SDNode *N);
   SDValue SplitVecOp_VP_CttzElements(SDNode *N);
 
@@ -1011,6 +1020,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
 
   SDValue WidenVecRes_Ternary(SDNode *N);
   SDValue WidenVecRes_Binary(SDNode *N);
+  SDValue WidenVecRes_CMP(SDNode *N);
   SDValue WidenVecRes_BinaryCanTrap(SDNode *N);
   SDValue WidenVecRes_BinaryWithExtraScalarOp(SDNode *N);
   SDValue WidenVecRes_StrictFP(SDNode *N);
@@ -1030,6 +1040,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue WidenVecOp_BITCAST(SDNode *N);
   SDValue WidenVecOp_CONCAT_VECTORS(SDNode *N);
   SDValue WidenVecOp_EXTEND(SDNode *N);
+  SDValue WidenVecOp_CMP(SDNode *N);
   SDValue WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
   SDValue WidenVecOp_INSERT_SUBVECTOR(SDNode *N);
   SDValue WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N);

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 340d50dce183a..14b147cc5b01b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -449,6 +449,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
   case ISD::FP_TO_SINT_SAT:
   case ISD::FP_TO_UINT_SAT:
   case ISD::MGATHER:
+  case ISD::SCMP:
+  case ISD::UCMP:
     Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
     break;
   case ISD::SMULFIX:

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 52ef6209bc5fb..532c6306fb3d1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -173,6 +173,12 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
   case ISD::ROTR:
     R = ScalarizeVecRes_BinOp(N);
     break;
+
+  case ISD::SCMP:
+  case ISD::UCMP:
+    R = ScalarizeVecRes_CMP(N);
+    break;
+
   case ISD::FMA:
   case ISD::FSHL:
   case ISD::FSHR:
@@ -222,6 +228,27 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) {
                      LHS.getValueType(), LHS, RHS, N->getFlags());
 }
 
+SDValue DAGTypeLegalizer::ScalarizeVecRes_CMP(SDNode *N) {
+  SDLoc DL(N);
+
+  SDValue LHS = N->getOperand(0);
+  SDValue RHS = N->getOperand(1);
+  if (getTypeAction(LHS.getValueType()) ==
+      TargetLowering::TypeScalarizeVector) {
+    LHS = GetScalarizedVector(LHS);
+    RHS = GetScalarizedVector(RHS);
+  } else {
+    EVT VT = LHS.getValueType().getVectorElementType();
+    LHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, LHS,
+                      DAG.getVectorIdxConstant(0, DL));
+    RHS = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, RHS,
+                      DAG.getVectorIdxConstant(0, DL));
+  }
+
+  return DAG.getNode(N->getOpcode(), SDLoc(N),
+                     N->getValueType(0).getVectorElementType(), LHS, RHS);
+}
+
 SDValue DAGTypeLegalizer::ScalarizeVecRes_TernaryOp(SDNode *N) {
   SDValue Op0 = GetScalarizedVector(N->getOperand(0));
   SDValue Op1 = GetScalarizedVector(N->getOperand(1));
@@ -775,6 +802,10 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
   case ISD::VECREDUCE_SEQ_FMUL:
     Res = ScalarizeVecOp_VECREDUCE_SEQ(N);
     break;
+  case ISD::SCMP:
+  case ISD::UCMP:
+    Res = ScalarizeVecOp_CMP(N);
+    break;
   }
 
   // If the result is null, the sub-method took care of registering results etc.
@@ -995,6 +1026,12 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(SDNode *N) {
                      AccOp, Op, N->getFlags());
 }
 
+SDValue DAGTypeLegalizer::ScalarizeVecOp_CMP(SDNode *N) {
+  SDValue LHS = GetScalarizedVector(N->getOperand(0));
+  SDValue RHS = GetScalarizedVector(N->getOperand(1));
+  return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), LHS, RHS);
+}
+
 //===----------------------------------------------------------------------===//
 //  Result Vector Splitting
 //===----------------------------------------------------------------------===//
@@ -1230,6 +1267,10 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
     SplitVecRes_TernaryOp(N, Lo, Hi);
     break;
 
+  case ISD::SCMP: case ISD::UCMP:
+    SplitVecRes_CMP(N, Lo, Hi);
+    break;
+
 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
   case ISD::STRICT_##DAGN:
 #include "llvm/IR/ConstrainedOps.def"
@@ -1373,6 +1414,27 @@ void DAGTypeLegalizer::SplitVecRes_TernaryOp(SDNode *N, SDValue &Lo,
                    {Op0Hi, Op1Hi, Op2Hi, MaskHi, EVLHi}, Flags);
 }
 
+void DAGTypeLegalizer::SplitVecRes_CMP(SDNode *N, SDValue &Lo, SDValue &Hi) {
+  LLVMContext &Ctxt = *DAG.getContext();
+  SDLoc dl(N);
+
+  SDValue LHS = N->getOperand(0);
+  SDValue RHS = N->getOperand(1);
+
+  SDValue LHSLo, LHSHi, RHSLo, RHSHi;
+  if (getTypeAction(LHS.getValueType()) == TargetLowering::TypeSplitVector) {
+    GetSplitVector(LHS, LHSLo, LHSHi);
+    GetSplitVector(RHS, RHSLo, RHSHi);
+  } else {
+    std::tie(LHSLo, LHSHi) = DAG.SplitVector(LHS, dl);
+    std::tie(RHSLo, RHSHi) = DAG.SplitVector(RHS, dl);
+  }
+
+  EVT SplitResVT = N->getValueType(0).getHalfNumVectorElementsVT(Ctxt);
+  Lo = DAG.getNode(N->getOpcode(), dl, SplitResVT, LHSLo, RHSLo);
+  Hi = DAG.getNode(N->getOpcode(), dl, SplitResVT, LHSHi, RHSHi);
+}
+
 void DAGTypeLegalizer::SplitVecRes_FIX(SDNode *N, SDValue &Lo, SDValue &Hi) {
   SDValue LHSLo, LHSHi;
   GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
@@ -3103,6 +3165,11 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
     Res = SplitVecOp_FPOpDifferentTypes(N);
     break;
 
+  case ISD::SCMP:
+  case ISD::UCMP:
+    Res = SplitVecOp_CMP(N);
+    break;
+
   case ISD::ANY_EXTEND_VECTOR_INREG:
   case ISD::SIGN_EXTEND_VECTOR_INREG:
   case ISD::ZERO_EXTEND_VECTOR_INREG:
@@ -4104,6 +4171,25 @@ SDValue DAGTypeLegalizer::SplitVecOp_FPOpDifferentTypes(SDNode *N) {
   return DAG.getNode(ISD::CONCAT_VECTORS, DL, N->getValueType(0), Lo, Hi);
 }
 
+SDValue DAGTypeLegalizer::SplitVecOp_CMP(SDNode *N) {
+  LLVMContext &Ctxt = *DAG.getContext();
+  SDLoc dl(N);
+
+  SDValue LHSLo, LHSHi, RHSLo, RHSHi;
+  GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
+  GetSplitVector(N->getOperand(1), RHSLo, RHSHi);
+
+  EVT ResVT = N->getValueType(0);
+  ElementCount SplitOpEC = LHSLo.getValueType().getVectorElementCount();
+  EVT NewResVT =
+      EVT::getVectorVT(Ctxt, ResVT.getVectorElementType(), SplitOpEC);
+
+  SDValue Lo = DAG.getNode(N->getOpcode(), dl, NewResVT, LHSLo, RHSLo);
+  SDValue Hi = DAG.getNode(N->getOpcode(), dl, NewResVT, LHSHi, RHSHi);
+
+  return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
+}
+
 SDValue DAGTypeLegalizer::SplitVecOp_FP_TO_XINT_SAT(SDNode *N) {
   EVT ResVT = N->getValueType(0);
   SDValue Lo, Hi;
@@ -4288,6 +4374,11 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
     Res = WidenVecRes_Binary(N);
     break;
 
+  case ISD::SCMP:
+  case ISD::UCMP:
+    Res = WidenVecRes_CMP(N);
+    break;
+
   case ISD::FPOW:
   case ISD::FREM:
     if (unrollExpandedOp())
@@ -4495,6 +4586,28 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
                      {InOp1, InOp2, Mask, N->getOperand(3)}, N->getFlags());
 }
 
+SDValue DAGTypeLegalizer::WidenVecRes_CMP(SDNode *N) {
+  LLVMContext &Ctxt = *DAG.getContext();
+  SDLoc dl(N);
+
+  SDValue LHS = N->getOperand(0);
+  SDValue RHS = N->getOperand(1);
+  EVT OpVT = LHS.getValueType();
+  if (getTypeAction(OpVT) == TargetLowering::TypeWidenVector) {
+    LHS = GetWidenedVector(LHS);
+    RHS = GetWidenedVector(RHS);
+    OpVT = LHS.getValueType();
+  }
+
+  EVT WidenResVT = TLI.getTypeToTransformTo(Ctxt, N->getValueType(0));
+  ElementCount WidenResEC = WidenResVT.getVectorElementCount();
+  if (WidenResEC == OpVT.getVectorElementCount()) {
+    return DAG.getNode(N->getOpcode(), dl, WidenResVT, LHS, RHS);
+  }
+
+  return DAG.UnrollVectorOp(N, WidenResVT.getVectorNumElements());
+}
+
 SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(SDNode *N) {
   // Binary op widening, but with an extra operand that shouldn't be widened.
   SDLoc dl(N);
@@ -6208,6 +6321,11 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
     Res = WidenVecOp_EXTEND(N);
     break;
 
+  case ISD::SCMP:
+  case ISD::UCMP:
+    Res = WidenVecOp_CMP(N);
+    break;
+
   case ISD::FP_EXTEND:
   case ISD::STRICT_FP_EXTEND:
   case ISD::FP_ROUND:
@@ -6354,6 +6472,32 @@ SDValue DAGTypeLegalizer::WidenVecOp_EXTEND(SDNode *N) {
   }
 }
 
+SDValue DAGTypeLegalizer::WidenVecOp_CMP(SDNode *N) {
+  SDLoc dl(N);
+
+  EVT OpVT = N->getOperand(0).getValueType();
+  EVT ResVT = N->getValueType(0);
+  SDValue LHS = GetWidenedVector(N->getOperand(0));
+  SDValue RHS = GetWidenedVector(N->getOperand(1));
+
+  // 1. EXTRACT_SUBVECTOR
+  // 2. SIGN_EXTEND/ZERO_EXTEND
+  // 3. CMP
+  LHS = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, LHS,
+                    DAG.getVectorIdxConstant(0, dl));
+  RHS = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, RHS,
+                    DAG.getVectorIdxConstant(0, dl));
+
+  // At this point the result type is guaranteed to be valid, so we can use it
+  // as the operand type by extending it appropriately
+  ISD::NodeType ExtendOpcode =
+      N->getOpcode() == ISD::SCMP ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+  LHS = DAG.getNode(ExtendOpcode, dl, ResVT, LHS);
+  RHS = DAG.getNode(ExtendOpcode, dl, ResVT, RHS);
+
+  return DAG.getNode(N->getOpcode(), dl, ResVT, LHS, RHS);
+}
+
 SDValue DAGTypeLegalizer::WidenVecOp_UnrollVectorOp(SDNode *N) {
   // The result (and first input) is legal, but the second input is illegal.
   // We can't do much to fix that, so just unroll and let the extracts off of

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 8838cce9810f8..453bc7129b5b0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -7220,6 +7220,20 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
     setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
     return;
   }
+  case Intrinsic::scmp: {
+    SDValue Op1 = getValue(I.getArgOperand(0));
+    SDValue Op2 = getValue(I.getArgOperand(1));
+    EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
+    setValue(&I, DAG.getNode(ISD::SCMP, sdl, DestVT, Op1, Op2));
+    break;
+  }
+  case Intrinsic::ucmp: {
+    SDValue Op1 = getValue(I.getArgOperand(0));
+    SDValue Op2 = getValue(I.getArgOperand(1));
+    EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
+    setValue(&I, DAG.getNode(ISD::UCMP, sdl, DestVT, Op1, Op2));
+    break;
+  }
   case Intrinsic::stacksave: {
     SDValue Op = getRoot();
     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 12a7b7f11778d..a7555d6d31f26 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -293,6 +293,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
   case ISD::SMAX:                       return "smax";
   case ISD::UMIN:                       return "umin";
   case ISD::UMAX:                       return "umax";
+  case ISD::SCMP:                       return "scmp";
+  case ISD::UCMP:                       return "ucmp";
 
   case ISD::FLDEXP:                     return "fldexp";
   case ISD::STRICT_FLDEXP:              return "strict_fldexp";

diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 623d2e0a047ef..ad957aaa8f141 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -10358,6 +10358,27 @@ SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const {
   return DAG.getSelect(dl, VT, Overflow, Result, SumDiff);
 }
 
+SDValue TargetLowering::expandCMP(SDNode *Node, SelectionDAG &DAG) const {
+  unsigned Opcode = Node->getOpcode();
+  SDValue LHS = Node->getOperand(0);
+  SDValue RHS = Node->getOperand(1);
+  EVT VT = LHS.getValueType();
+  EVT ResVT = Node->getValueType(0);
+  EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
+  SDLoc dl(Node);
+
+  auto LTPredicate = (Opcode == ISD::UCMP ? ISD::SETULT : ISD::SETLT);
+  auto GTPredicate = (Opcode == ISD::UCMP ? ISD::SETUGT : ISD::SETGT);
+
+  SDValue IsLT = DAG.getSetCC(dl, BoolVT, LHS, RHS, LTPredicate);
+  SDValue IsGT = DAG.getSetCC(dl, BoolVT, LHS, RHS, GTPredicate);
+  SDValue SelectZeroOrOne =
+      DAG.getSelect(dl, ResVT, IsGT, DAG.getConstant(1, dl, ResVT),
+                    DAG.getConstant(0, dl, ResVT));
+  return DAG.getSelect(dl, ResVT, IsLT, DAG.getConstant(-1, dl, ResVT),
+                       SelectZeroOrOne);
+}
+
 SDValue TargetLowering::expandShlSat(SDNode *Node, SelectionDAG &DAG) const {
   unsigned Opcode = Node->getOpcode();
   bool IsSigned = Opcode == ISD::SSHLSAT;

diff  --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index de534994fa48c..ff684c7cb6bba 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -938,6 +938,9 @@ void TargetLoweringBase::initActions() {
     setOperationAction({ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}, VT,
                        Expand);
 
+    // [US]CMP default to expand
+    setOperationAction({ISD::UCMP, ISD::SCMP}, VT, Expand);
+
     // Halving adds
     setOperationAction(
         {ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS, ISD::AVGCEILU}, VT,

diff  --git a/llvm/test/CodeGen/AArch64/scmp.ll b/llvm/test/CodeGen/AArch64/scmp.ll
new file mode 100644
index 0000000000000..a7abc5eadaff6
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/scmp.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+
+define i8 @scmp.8.8(i8 %x, i8 %y) nounwind {
+; CHECK-LABEL: scmp.8.8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb w8, w0
+; CHECK-NEXT:    cmp w8, w1, sxtb
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    csinv w0, w8, wzr, ge
+; CHECK-NEXT:    ret
+  %1 = call i8 @llvm.scmp(i8 %x, i8 %y)
+  ret i8 %1
+}
+
+define i8 @scmp.8.16(i16 %x, i16 %y) nounwind {
+; CHECK-LABEL: scmp.8.16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxth w8, w0
+; CHECK-NEXT:    cmp w8, w1, sxth
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    csinv w0, w8, wzr, ge
+; CHECK-NEXT:    ret
+  %1 = call i8 @llvm.scmp(i16 %x, i16 %y)
+  ret i8 %1
+}
+
+define i8 @scmp.8.32(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: scmp.8.32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    csinv w0, w8, wzr, ge
+; CHECK-NEXT:    ret
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+define i8 @scmp.8.64(i64 %x, i64 %y) nounwind {
+; CHECK-LABEL: scmp.8.64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    csinv w0, w8, wzr, ge
+; CHECK-NEXT:    ret
+  %1 = call i8 @llvm.scmp(i64 %x, i64 %y)
+  ret i8 %1
+}
+
+define i8 @scmp.8.128(i128 %x, i128 %y) nounwind {
+; CHECK-LABEL: scmp.8.128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csinv w0, w8, wzr, ge
+; CHECK-NEXT:    ret
+  %1 = call i8 @llvm.scmp(i128 %x, i128 %y)
+  ret i8 %1
+}
+
+define i32 @scmp.32.32(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: scmp.32.32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    csinv w0, w8, wzr, ge
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.scmp(i32 %x, i32 %y)
+  ret i32 %1
+}
+
+define i32 @scmp.32.64(i64 %x, i64 %y) nounwind {
+; CHECK-LABEL: scmp.32.64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    cset w8, gt
+; CHECK-NEXT:    csinv w0, w8, wzr, ge
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.scmp(i64 %x, i64 %y)
+  ret i32 %1
+}
+
+define i64 @scmp.64.64(i64 %x, i64 %y) nounwind {
+; CHECK-LABEL: scmp.64.64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    cset x8, gt
+; CHECK-NEXT:    csinv x0, x8, xzr, ge
+; CHECK-NEXT:    ret
+  %1 = call i64 @llvm.scmp(i64 %x, i64 %y)
+  ret i64 %1
+}

diff  --git a/llvm/test/CodeGen/AArch64/ucmp.ll b/llvm/test/CodeGen/AArch64/ucmp.ll
new file mode 100644
index 0000000000000..39a32194147eb
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ucmp.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+
+define i8 @ucmp.8.8(i8 %x, i8 %y) nounwind {
+; CHECK-LABEL: ucmp.8.8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w0, #0xff
+; CHECK-NEXT:    cmp w8, w1, uxtb
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    csinv w0, w8, wzr, hs
+; CHECK-NEXT:    ret
+  %1 = call i8 @llvm.ucmp(i8 %x, i8 %y)
+  ret i8 %1
+}
+
+define i8 @ucmp.8.16(i16 %x, i16 %y) nounwind {
+; CHECK-LABEL: ucmp.8.16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w0, #0xffff
+; CHECK-NEXT:    cmp w8, w1, uxth
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    csinv w0, w8, wzr, hs
+; CHECK-NEXT:    ret
+  %1 = call i8 @llvm.ucmp(i16 %x, i16 %y)
+  ret i8 %1
+}
+
+define i8 @ucmp.8.32(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: ucmp.8.32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    csinv w0, w8, wzr, hs
+; CHECK-NEXT:    ret
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+define i8 @ucmp.8.64(i64 %x, i64 %y) nounwind {
+; CHECK-LABEL: ucmp.8.64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    csinv w0, w8, wzr, hs
+; CHECK-NEXT:    ret
+  %1 = call i8 @llvm.ucmp(i64 %x, i64 %y)
+  ret i8 %1
+}
+
+define i8 @ucmp.8.128(i128 %x, i128 %y) nounwind {
+; CHECK-LABEL: ucmp.8.128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x2, x0
+; CHECK-NEXT:    sbcs xzr, x3, x1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp x0, x2
+; CHECK-NEXT:    sbcs xzr, x1, x3
+; CHECK-NEXT:    csinv w0, w8, wzr, hs
+; CHECK-NEXT:    ret
+  %1 = call i8 @llvm.ucmp(i128 %x, i128 %y)
+  ret i8 %1
+}
+
+define i32 @ucmp.32.32(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: ucmp.32.32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    csinv w0, w8, wzr, hs
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
+  ret i32 %1
+}
+
+define i32 @ucmp.32.64(i64 %x, i64 %y) nounwind {
+; CHECK-LABEL: ucmp.32.64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    csinv w0, w8, wzr, hs
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.ucmp(i64 %x, i64 %y)
+  ret i32 %1
+}
+
+define i64 @ucmp.64.64(i64 %x, i64 %y) nounwind {
+; CHECK-LABEL: ucmp.64.64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    cset x8, hi
+; CHECK-NEXT:    csinv x0, x8, xzr, hs
+; CHECK-NEXT:    ret
+  %1 = call i64 @llvm.ucmp(i64 %x, i64 %y)
+  ret i64 %1
+}

diff  --git a/llvm/test/CodeGen/X86/scmp.ll b/llvm/test/CodeGen/X86/scmp.ll
new file mode 100644
index 0000000000000..55dc0d6059e05
--- /dev/null
+++ b/llvm/test/CodeGen/X86/scmp.ll
@@ -0,0 +1,2094 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
+
+define i8 @scmp.8.8(i8 %x, i8 %y) nounwind {
+; X64-LABEL: scmp.8.8:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpb %sil, %dil
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovgel %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp.8.8:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jl .LBB0_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB0_2:
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.scmp(i8 %x, i8 %y)
+  ret i8 %1
+}
+
+define i8 @scmp.8.16(i16 %x, i16 %y) nounwind {
+; X64-LABEL: scmp.8.16:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpw %si, %di
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovgel %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp.8.16:
+; X86:       # %bb.0:
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpw {{[0-9]+}}(%esp), %ax
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jl .LBB1_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB1_2:
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.scmp(i16 %x, i16 %y)
+  ret i8 %1
+}
+
+define i8 @scmp.8.32(i32 %x, i32 %y) nounwind {
+; X64-LABEL: scmp.8.32:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpl %esi, %edi
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovgel %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp.8.32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jl .LBB2_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB2_2:
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+define i8 @scmp.8.64(i64 %x, i64 %y) nounwind {
+; X64-LABEL: scmp.8.64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovgel %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp.8.64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %eax, %esi
+; X86-NEXT:    movl %edi, %ecx
+; X86-NEXT:    sbbl %edx, %ecx
+; X86-NEXT:    setl %cl
+; X86-NEXT:    cmpl %esi, %eax
+; X86-NEXT:    sbbl %edi, %edx
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jl .LBB3_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB3_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.scmp(i64 %x, i64 %y)
+  ret i8 %1
+}
+
+define i8 @scmp.8.128(i128 %x, i128 %y) nounwind {
+; X64-LABEL: scmp.8.128:
+; X64:       # %bb.0:
+; X64-NEXT:    cmpq %rdi, %rdx
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    sbbq %rsi, %rax
+; X64-NEXT:    setl %al
+; X64-NEXT:    movzbl %al, %r8d
+; X64-NEXT:    cmpq %rdx, %rdi
+; X64-NEXT:    sbbq %rcx, %rsi
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovgel %r8d, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp.8.128:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %ebp, %ebx
+; X86-NEXT:    sbbl %edx, %ebx
+; X86-NEXT:    movl %ecx, %ebx
+; X86-NEXT:    sbbl %eax, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %esi, %ecx
+; X86-NEXT:    sbbl %ebx, %ecx
+; X86-NEXT:    setl %cl
+; X86-NEXT:    cmpl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT:    sbbl %ebp, %edx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl %esi, %ebx
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jl .LBB4_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB4_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.scmp(i128 %x, i128 %y)
+  ret i8 %1
+}
+
+define i32 @scmp.32.32(i32 %x, i32 %y) nounwind {
+; X64-LABEL: scmp.32.32:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpl %esi, %edi
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movl $-1, %eax
+; X64-NEXT:    cmovgel %ecx, %eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp.32.32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setg %dl
+; X86-NEXT:    movl $-1, %eax
+; X86-NEXT:    jl .LBB5_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %dl, %cl
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB5_2:
+; X86-NEXT:    retl
+  %1 = call i32 @llvm.scmp(i32 %x, i32 %y)
+  ret i32 %1
+}
+
+define i32 @scmp.32.64(i64 %x, i64 %y) nounwind {
+; X64-LABEL: scmp.32.64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movl $-1, %eax
+; X64-NEXT:    cmovgel %ecx, %eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp.32.64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %eax, %esi
+; X86-NEXT:    movl %edi, %ecx
+; X86-NEXT:    sbbl %edx, %ecx
+; X86-NEXT:    setl %cl
+; X86-NEXT:    cmpl %esi, %eax
+; X86-NEXT:    sbbl %edi, %edx
+; X86-NEXT:    movl $-1, %eax
+; X86-NEXT:    jl .LBB6_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movzbl %cl, %eax
+; X86-NEXT:  .LBB6_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+  %1 = call i32 @llvm.scmp(i64 %x, i64 %y)
+  ret i32 %1
+}
+
+define i64 @scmp.64.64(i64 %x, i64 %y) nounwind {
+; X64-LABEL: scmp.64.64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movq $-1, %rax
+; X64-NEXT:    cmovgeq %rcx, %rax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp.64.64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %eax, %esi
+; X86-NEXT:    movl %edi, %ecx
+; X86-NEXT:    sbbl %edx, %ecx
+; X86-NEXT:    setl %cl
+; X86-NEXT:    cmpl %esi, %eax
+; X86-NEXT:    sbbl %edi, %edx
+; X86-NEXT:    movl $-1, %eax
+; X86-NEXT:    movl $-1, %edx
+; X86-NEXT:    jl .LBB7_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movzbl %cl, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:  .LBB7_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+  %1 = call i64 @llvm.scmp(i64 %x, i64 %y)
+  ret i64 %1
+}
+
+define i4 @scmp_narrow_result(i32 %x, i32 %y) nounwind {
+; X64-LABEL: scmp_narrow_result:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpl %esi, %edi
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovgel %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_narrow_result:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jl .LBB8_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB8_2:
+; X86-NEXT:    retl
+  %1 = call i4 @llvm.scmp(i32 %x, i32 %y)
+  ret i4 %1
+}
+
+define i8 @scmp_narrow_op(i62 %x, i62 %y) nounwind {
+; X64-LABEL: scmp_narrow_op:
+; X64:       # %bb.0:
+; X64-NEXT:    shlq $2, %rsi
+; X64-NEXT:    sarq $2, %rsi
+; X64-NEXT:    shlq $2, %rdi
+; X64-NEXT:    sarq $2, %rdi
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovgel %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_narrow_op:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shll $2, %eax
+; X86-NEXT:    sarl $2, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    shll $2, %edi
+; X86-NEXT:    sarl $2, %edi
+; X86-NEXT:    cmpl %ecx, %esi
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    sbbl %eax, %edx
+; X86-NEXT:    setl %dl
+; X86-NEXT:    cmpl %esi, %ecx
+; X86-NEXT:    sbbl %edi, %eax
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jl .LBB9_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:  .LBB9_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.scmp(i62 %x, i62 %y)
+  ret i8 %1
+}
+
+define i141 @scmp_wide_result(i32 %x, i32 %y) nounwind {
+; X64-LABEL: scmp_wide_result:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpl %esi, %edi
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movq $-1, %rax
+; X64-NEXT:    cmovgeq %rcx, %rax
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_wide_result:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    setg %bl
+; X86-NEXT:    movl $-1, %edx
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    jl .LBB10_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    xorl %esi, %esi
+; X86-NEXT:    movb %bl, %cl
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:  .LBB10_2:
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %edx, (%eax)
+; X86-NEXT:    movl $0, 12(%eax)
+; X86-NEXT:    movl $0, 8(%eax)
+; X86-NEXT:    movw $0, 16(%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl $4
+  %1 = call i141 @llvm.scmp(i32 %x, i32 %y)
+  ret i141 %1
+}
+
+define i8 @scmp_wide_op(i109 %x, i109 %y) nounwind {
+; X64-LABEL: scmp_wide_op:
+; X64:       # %bb.0:
+; X64-NEXT:    shlq $19, %rsi
+; X64-NEXT:    sarq $19, %rsi
+; X64-NEXT:    shlq $19, %rcx
+; X64-NEXT:    sarq $19, %rcx
+; X64-NEXT:    cmpq %rdi, %rdx
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    sbbq %rsi, %rax
+; X64-NEXT:    setl %al
+; X64-NEXT:    movzbl %al, %r8d
+; X64-NEXT:    cmpq %rdx, %rdi
+; X64-NEXT:    sbbq %rcx, %rsi
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovgel %r8d, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_wide_op:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shll $19, %eax
+; X86-NEXT:    sarl $19, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    shll $19, %ecx
+; X86-NEXT:    sarl $19, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    sbbl %edx, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %edi, %esi
+; X86-NEXT:    sbbl %ebx, %esi
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    sbbl %eax, %esi
+; X86-NEXT:    setl {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %ebp, {{[0-9]+}}(%esp)
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    sbbl %edi, %ebx
+; X86-NEXT:    sbbl %ecx, %eax
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jl .LBB11_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X86-NEXT:  .LBB11_2:
+; X86-NEXT:    addl $4, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.scmp(i109 %x, i109 %y)
+  ret i8 %1
+}
+
+define i41 @scmp_uncommon_types(i7 %x, i7 %y) nounwind {
+; X64-LABEL: scmp_uncommon_types:
+; X64:       # %bb.0:
+; X64-NEXT:    addb %sil, %sil
+; X64-NEXT:    sarb %sil
+; X64-NEXT:    addb %dil, %dil
+; X64-NEXT:    sarb %dil
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpb %sil, %dil
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movq $-1, %rax
+; X64-NEXT:    cmovgeq %rcx, %rax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_uncommon_types:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addb %al, %al
+; X86-NEXT:    sarb %al
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    addb %dl, %dl
+; X86-NEXT:    sarb %dl
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    cmpb %al, %dl
+; X86-NEXT:    setg %bl
+; X86-NEXT:    movl $-1, %eax
+; X86-NEXT:    movl $-1, %edx
+; X86-NEXT:    jl .LBB12_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movb %bl, %cl
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB12_2:
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl
+  %1 = call i41 @llvm.scmp(i7 %x, i7 %y)
+  ret i41 %1
+}
+
+define <4 x i32> @scmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind {
+; X64-LABEL: scmp_normal_vectors:
+; X64:       # %bb.0:
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; X64-NEXT:    movd %xmm2, %eax
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; X64-NEXT:    movd %xmm2, %ecx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpl %eax, %ecx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    movl $-1, %eax
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; X64-NEXT:    movd %xmm3, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; X64-NEXT:    movd %xmm3, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm3
+; X64-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X64-NEXT:    movd %xmm1, %ecx
+; X64-NEXT:    movd %xmm0, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X64-NEXT:    movd %xmm1, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-NEXT:    movd %xmm0, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm0
+; X64-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; X64-NEXT:    movdqa %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_normal_vectors:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setg %al
+; X86-NEXT:    movl $-1, %edx
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    jl .LBB13_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %al, %bl
+; X86-NEXT:    movl %ebx, %ebp
+; X86-NEXT:  .LBB13_2:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    setg %al
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    jl .LBB13_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %al, %bl
+; X86-NEXT:    movl %ebx, %esi
+; X86-NEXT:  .LBB13_4:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movl $-1, %edi
+; X86-NEXT:    jl .LBB13_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:  .LBB13_6:
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    setg %cl
+; X86-NEXT:    jl .LBB13_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:  .LBB13_8:
+; X86-NEXT:    movl %edx, 12(%eax)
+; X86-NEXT:    movl %edi, 8(%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %ebp, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+  %1 = call <4 x i32> @llvm.scmp(<4 x i32> %x, <4 x i32> %y)
+  ret <4 x i32> %1
+}
+
+define <4 x i8> @scmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind {
+; X64-LABEL: scmp_narrow_vec_result:
+; X64:       # %bb.0:
+; X64-NEXT:    movd %xmm1, %eax
+; X64-NEXT:    movd %xmm0, %ecx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpl %eax, %ecx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movzbl %dl, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
+; X64-NEXT:    movd %xmm2, %edx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
+; X64-NEXT:    movd %xmm2, %esi
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    cmpl %edx, %esi
+; X64-NEXT:    setg %dil
+; X64-NEXT:    cmovll %eax, %edi
+; X64-NEXT:    movzbl %dil, %edx
+; X64-NEXT:    shll $8, %edx
+; X64-NEXT:    orl %ecx, %edx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; X64-NEXT:    movd %xmm2, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; X64-NEXT:    movd %xmm2, %esi
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    cmpl %ecx, %esi
+; X64-NEXT:    setg %dil
+; X64-NEXT:    cmovll %eax, %edi
+; X64-NEXT:    movzbl %dil, %ecx
+; X64-NEXT:    shll $16, %ecx
+; X64-NEXT:    orl %edx, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; X64-NEXT:    movd %xmm1, %edx
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X64-NEXT:    movd %xmm0, %esi
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    cmpl %edx, %esi
+; X64-NEXT:    setg %dil
+; X64-NEXT:    cmovll %eax, %edi
+; X64-NEXT:    shll $24, %edi
+; X64-NEXT:    orl %ecx, %edi
+; X64-NEXT:    movd %edi, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_narrow_vec_result:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    setg %ch
+; X86-NEXT:    movb $-1, %dl
+; X86-NEXT:    movb $-1, %cl
+; X86-NEXT:    jl .LBB14_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %ch, %cl
+; X86-NEXT:  .LBB14_2:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    setg %al
+; X86-NEXT:    movb $-1, %ch
+; X86-NEXT:    jl .LBB14_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %al, %ch
+; X86-NEXT:  .LBB14_4:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    setg %bl
+; X86-NEXT:    movb $-1, %dh
+; X86-NEXT:    jl .LBB14_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movb %bl, %dh
+; X86-NEXT:  .LBB14_6:
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    setg %bl
+; X86-NEXT:    jl .LBB14_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movb %bl, %dl
+; X86-NEXT:  .LBB14_8:
+; X86-NEXT:    movb %dl, 3(%eax)
+; X86-NEXT:    movb %dh, 2(%eax)
+; X86-NEXT:    movb %ch, 1(%eax)
+; X86-NEXT:    movb %cl, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl $4
+  %1 = call <4 x i8> @llvm.scmp(<4 x i32> %x, <4 x i32> %y)
+  ret <4 x i8> %1
+}
+
+define <4 x i32> @scmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind {
+; X64-LABEL: scmp_narrow_vec_op:
+; X64:       # %bb.0:
+; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; X64-NEXT:    psrad $24, %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; X64-NEXT:    movd %xmm2, %eax
+; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X64-NEXT:    psrad $24, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[3,3,3,3]
+; X64-NEXT:    movd %xmm0, %ecx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpl %eax, %ecx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    movl $-1, %eax
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm0
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; X64-NEXT:    movd %xmm3, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
+; X64-NEXT:    movd %xmm3, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm3
+; X64-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; X64-NEXT:    movd %xmm1, %ecx
+; X64-NEXT:    movd %xmm2, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm0
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X64-NEXT:    movd %xmm1, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
+; X64-NEXT:    movd %xmm1, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm1
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_narrow_vec_op:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    setg %al
+; X86-NEXT:    movl $-1, %edx
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    jl .LBB15_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %al, %bl
+; X86-NEXT:    movl %ebx, %ebp
+; X86-NEXT:  .LBB15_2:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %ch
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    setg %bl
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    jl .LBB15_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %bl, %al
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:  .LBB15_4:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %ch
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movl $-1, %edi
+; X86-NEXT:    jl .LBB15_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:  .LBB15_6:
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    setg %cl
+; X86-NEXT:    jl .LBB15_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:  .LBB15_8:
+; X86-NEXT:    movl %edx, 12(%eax)
+; X86-NEXT:    movl %edi, 8(%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %ebp, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+  %1 = call <4 x i32> @llvm.scmp(<4 x i8> %x, <4 x i8> %y)
+  ret <4 x i32> %1
+}
+
+define <16 x i32> @scmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind {
+; X64-LABEL: scmp_wide_vec_result:
+; X64:       # %bb.0:
+; X64-NEXT:    movdqa %xmm1, %xmm3
+; X64-NEXT:    movdqa %xmm0, %xmm2
+; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; X64-NEXT:    psrad $24, %xmm5
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[3,3,3,3]
+; X64-NEXT:    movd %xmm0, %eax
+; X64-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; X64-NEXT:    psrad $24, %xmm6
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[3,3,3,3]
+; X64-NEXT:    movd %xmm0, %ecx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpl %eax, %ecx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    movl $-1, %eax
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm0
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[2,3,2,3]
+; X64-NEXT:    movd %xmm7, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
+; X64-NEXT:    movd %xmm7, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm7
+; X64-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
+; X64-NEXT:    movd %xmm5, %ecx
+; X64-NEXT:    movd %xmm6, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm0
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
+; X64-NEXT:    movd %xmm5, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,1,1]
+; X64-NEXT:    movd %xmm5, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm5
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm7[0]
+; X64-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; X64-NEXT:    psrad $24, %xmm5
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[3,3,3,3]
+; X64-NEXT:    movd %xmm1, %ecx
+; X64-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
+; X64-NEXT:    psrad $24, %xmm4
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3]
+; X64-NEXT:    movd %xmm1, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
+; X64-NEXT:    movd %xmm6, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[2,3,2,3]
+; X64-NEXT:    movd %xmm6, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm6
+; X64-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
+; X64-NEXT:    movd %xmm5, %ecx
+; X64-NEXT:    movd %xmm4, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
+; X64-NEXT:    movd %xmm5, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,1,1]
+; X64-NEXT:    movd %xmm4, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm4
+; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm6[0]
+; X64-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; X64-NEXT:    psrad $24, %xmm5
+; X64-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[3,3,3,3]
+; X64-NEXT:    movd %xmm4, %ecx
+; X64-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; X64-NEXT:    psrad $24, %xmm6
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm6[3,3,3,3]
+; X64-NEXT:    movd %xmm2, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[2,3,2,3]
+; X64-NEXT:    movd %xmm7, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
+; X64-NEXT:    movd %xmm7, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm7
+; X64-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
+; X64-NEXT:    movd %xmm5, %ecx
+; X64-NEXT:    movd %xmm6, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
+; X64-NEXT:    movd %xmm5, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,1,1]
+; X64-NEXT:    movd %xmm5, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm5
+; X64-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm7[0]
+; X64-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; X64-NEXT:    psrad $24, %xmm5
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[3,3,3,3]
+; X64-NEXT:    movd %xmm3, %ecx
+; X64-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
+; X64-NEXT:    psrad $24, %xmm4
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[3,3,3,3]
+; X64-NEXT:    movd %xmm3, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm3
+; X64-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
+; X64-NEXT:    movd %xmm6, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm6 = xmm4[2,3,2,3]
+; X64-NEXT:    movd %xmm6, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm6
+; X64-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
+; X64-NEXT:    movd %xmm5, %ecx
+; X64-NEXT:    movd %xmm4, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm3
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
+; X64-NEXT:    movd %xmm5, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,1,1]
+; X64-NEXT:    movd %xmm4, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %eax, %esi
+; X64-NEXT:    movd %esi, %xmm4
+; X64-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm6[0]
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_wide_vec_result:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $48, %esp
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %dl
+; X86-NEXT:    setg %dl
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    jl .LBB16_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %dl, %cl
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:  .LBB16_2:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    setg %al
+; X86-NEXT:    movl $-1, %edi
+; X86-NEXT:    jl .LBB16_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %al, %dl
+; X86-NEXT:    movl %edx, %edi
+; X86-NEXT:  .LBB16_4:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movl $-1, %ebx
+; X86-NEXT:    jl .LBB16_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movb %cl, %dl
+; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:  .LBB16_6:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    setg %al
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    jl .LBB16_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movb %al, %dl
+; X86-NEXT:    movl %edx, %ebp
+; X86-NEXT:  .LBB16_8:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jl .LBB16_10
+; X86-NEXT:  # %bb.9:
+; X86-NEXT:    movb %cl, %dl
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_10:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    setg %al
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jl .LBB16_12
+; X86-NEXT:  # %bb.11:
+; X86-NEXT:    movb %al, %dl
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_12:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jl .LBB16_14
+; X86-NEXT:  # %bb.13:
+; X86-NEXT:    movb %cl, %dl
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_14:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    setg %al
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jl .LBB16_16
+; X86-NEXT:  # %bb.15:
+; X86-NEXT:    movb %al, %dl
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_16:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jl .LBB16_18
+; X86-NEXT:  # %bb.17:
+; X86-NEXT:    movb %cl, %dl
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_18:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    setg %al
+; X86-NEXT:    movl $-1, (%esp) # 4-byte Folded Spill
+; X86-NEXT:    jl .LBB16_20
+; X86-NEXT:  # %bb.19:
+; X86-NEXT:    movb %al, %dl
+; X86-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X86-NEXT:  .LBB16_20:
+; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movl $-1, %ebx
+; X86-NEXT:    jl .LBB16_22
+; X86-NEXT:  # %bb.21:
+; X86-NEXT:    movb %cl, %dl
+; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:  .LBB16_22:
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    setg %al
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    jl .LBB16_24
+; X86-NEXT:  # %bb.23:
+; X86-NEXT:    movb %al, %bl
+; X86-NEXT:    movl %ebx, %ebp
+; X86-NEXT:  .LBB16_24:
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %dl
+; X86-NEXT:    setg %ah
+; X86-NEXT:    movl $-1, %edx
+; X86-NEXT:    jl .LBB16_26
+; X86-NEXT:  # %bb.25:
+; X86-NEXT:    movb %ah, %bl
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:  .LBB16_26:
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    setg %al
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    jl .LBB16_28
+; X86-NEXT:  # %bb.27:
+; X86-NEXT:    movb %al, %bl
+; X86-NEXT:    movl %ebx, %esi
+; X86-NEXT:  .LBB16_28:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %ch
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movl $-1, %edi
+; X86-NEXT:    jl .LBB16_30
+; X86-NEXT:  # %bb.29:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:  .LBB16_30:
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %ch
+; X86-NEXT:    setg %cl
+; X86-NEXT:    jl .LBB16_32
+; X86-NEXT:  # %bb.31:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_32:
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 60(%eax)
+; X86-NEXT:    movl %edi, 56(%eax)
+; X86-NEXT:    movl %esi, 52(%eax)
+; X86-NEXT:    movl %edx, 48(%eax)
+; X86-NEXT:    movl %ebp, 44(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 40(%eax)
+; X86-NEXT:    movl (%esp), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 36(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 32(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 28(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 24(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 20(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 16(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 12(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 8(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 4(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, (%eax)
+; X86-NEXT:    addl $48, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+  %1 = call <16 x i32> @llvm.scmp(<16 x i8> %x, <16 x i8> %y)
+  ret <16 x i32> %1
+}
+
+define <16 x i8> @scmp_wide_vec_op(<16 x i64> %x, <16 x i64> %y) nounwind {
+; X64-LABEL: scmp_wide_vec_op:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %xmm7, %rax
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovll %eax, %ecx
+; X64-NEXT:    movd %ecx, %xmm8
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
+; X64-NEXT:    movq %xmm7, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm7
+; X64-NEXT:    punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; X64-NEXT:    movq %xmm6, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm7
+; X64-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[2,3,2,3]
+; X64-NEXT:    movq %xmm6, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm6
+; X64-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; X64-NEXT:    movq %xmm5, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm6
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
+; X64-NEXT:    movq %xmm5, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm5
+; X64-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; X64-NEXT:    movq %xmm4, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm5
+; X64-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
+; X64-NEXT:    movq %xmm4, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm4
+; X64-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; X64-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
+; X64-NEXT:    movq %xmm3, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm4
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
+; X64-NEXT:    movq %xmm3, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm3
+; X64-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; X64-NEXT:    movq %xmm2, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm3
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; X64-NEXT:    movq %xmm2, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm2
+; X64-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X64-NEXT:    movq %xmm1, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
+; X64-NEXT:    movq %xmm1, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm1
+; X64-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; X64-NEXT:    movq %xmm0, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; X64-NEXT:    movq %xmm0, %rcx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovll %eax, %edx
+; X64-NEXT:    movd %edx, %xmm0
+; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; X64-NEXT:    movdqa %xmm1, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_wide_vec_op:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $16, %esp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %eax, %edx
+; X86-NEXT:    movl %edi, %ecx
+; X86-NEXT:    sbbl %esi, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %edx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    sbbl %edi, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movb $-1, %bh
+; X86-NEXT:    jl .LBB17_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %bl, %bh
+; X86-NEXT:  .LBB17_2:
+; X86-NEXT:    cmpl %ecx, %edx
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    sbbl %eax, %edi
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %edx, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jl .LBB17_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB17_4:
+; X86-NEXT:    cmpl %edx, %eax
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    sbbl %ecx, %edi
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %eax, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl %esi, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jl .LBB17_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB17_6:
+; X86-NEXT:    cmpl %edx, %ecx
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    sbbl %eax, %edi
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %ecx, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jl .LBB17_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB17_8:
+; X86-NEXT:    cmpl %edx, %eax
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    sbbl %ecx, %edi
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %eax, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl %esi, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jl .LBB17_10
+; X86-NEXT:  # %bb.9:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB17_10:
+; X86-NEXT:    cmpl %edx, %ecx
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    sbbl %eax, %edi
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %ecx, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jl .LBB17_12
+; X86-NEXT:  # %bb.11:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB17_12:
+; X86-NEXT:    cmpl %edx, %eax
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    sbbl %ecx, %edi
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %eax, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl %esi, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jl .LBB17_14
+; X86-NEXT:  # %bb.13:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB17_14:
+; X86-NEXT:    cmpl %edx, %ecx
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    sbbl %eax, %edi
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %ecx, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jl .LBB17_16
+; X86-NEXT:  # %bb.15:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB17_16:
+; X86-NEXT:    cmpl %edx, %eax
+; X86-NEXT:    movl %edi, %esi
+; X86-NEXT:    sbbl %ecx, %esi
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %eax, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl %edi, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jl .LBB17_18
+; X86-NEXT:  # %bb.17:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB17_18:
+; X86-NEXT:    cmpl %esi, %ecx
+; X86-NEXT:    movl %edx, %edi
+; X86-NEXT:    sbbl %eax, %edi
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %ecx, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl %edx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jl .LBB17_20
+; X86-NEXT:  # %bb.19:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB17_20:
+; X86-NEXT:    cmpl %esi, %eax
+; X86-NEXT:    movl %edx, %edi
+; X86-NEXT:    sbbl %ecx, %edi
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %eax, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl %edx, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jl .LBB17_22
+; X86-NEXT:  # %bb.21:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB17_22:
+; X86-NEXT:    cmpl %esi, %edx
+; X86-NEXT:    movl %ecx, %edi
+; X86-NEXT:    sbbl %eax, %edi
+; X86-NEXT:    setl %bl
+; X86-NEXT:    cmpl %edx, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movb $-1, %cl
+; X86-NEXT:    jl .LBB17_24
+; X86-NEXT:  # %bb.23:
+; X86-NEXT:    movl %ebx, %ecx
+; X86-NEXT:  .LBB17_24:
+; X86-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movb %bh, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    cmpl %edi, %eax
+; X86-NEXT:    movl %edx, %ecx
+; X86-NEXT:    sbbl %esi, %ecx
+; X86-NEXT:    setl %ch
+; X86-NEXT:    cmpl %eax, %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl %edx, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movb $-1, %cl
+; X86-NEXT:    jl .LBB17_26
+; X86-NEXT:  # %bb.25:
+; X86-NEXT:    movb %ch, %cl
+; X86-NEXT:  .LBB17_26:
+; X86-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    cmpl %edi, %esi
+; X86-NEXT:    movl %ebp, %ecx
+; X86-NEXT:    sbbl %eax, %ecx
+; X86-NEXT:    setl %dh
+; X86-NEXT:    cmpl %esi, %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %ebp, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jl .LBB17_28
+; X86-NEXT:  # %bb.27:
+; X86-NEXT:    movb %dh, %al
+; X86-NEXT:  .LBB17_28:
+; X86-NEXT:    movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl %edi, %ebx
+; X86-NEXT:    movl %ebp, %edx
+; X86-NEXT:    sbbl %esi, %ebp
+; X86-NEXT:    setl %cl
+; X86-NEXT:    cmpl %ebx, %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    sbbl %edx, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movb $-1, %dh
+; X86-NEXT:    jl .LBB17_30
+; X86-NEXT:  # %bb.29:
+; X86-NEXT:    movb %cl, %dh
+; X86-NEXT:  .LBB17_30:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl %ebx, %ecx
+; X86-NEXT:    movl %ebp, %esi
+; X86-NEXT:    sbbl %edi, %esi
+; X86-NEXT:    setl %dl
+; X86-NEXT:    cmpl %ecx, %ebx
+; X86-NEXT:    sbbl %ebp, %edi
+; X86-NEXT:    movb $-1, %bl
+; X86-NEXT:    jl .LBB17_32
+; X86-NEXT:  # %bb.31:
+; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:  .LBB17_32:
+; X86-NEXT:    movb %bl, 15(%eax)
+; X86-NEXT:    movb %dh, 14(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 13(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 12(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 11(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 10(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 9(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 8(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 7(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 6(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 5(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 4(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 3(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 2(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 1(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, (%eax)
+; X86-NEXT:    addl $16, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+  %1 = call <16 x i8> @llvm.scmp(<16 x i64> %x, <16 x i64> %y)
+  ret <16 x i8> %1
+}
+
+define <7 x i117> @scmp_uncommon_vectors(<7 x i7> %x, <7 x i7> %y) nounwind {
+; X64-LABEL: scmp_uncommon_vectors:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbp
+; X64-NEXT:    pushq %r15
+; X64-NEXT:    pushq %r14
+; X64-NEXT:    pushq %r12
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %r10d
+; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %ebx
+; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %ebp
+; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %r14d
+; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %r15d
+; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %r12d
+; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %r11d
+; X64-NEXT:    addb %r11b, %r11b
+; X64-NEXT:    sarb %r11b
+; X64-NEXT:    addb %dl, %dl
+; X64-NEXT:    sarb %dl
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    cmpb %r11b, %dl
+; X64-NEXT:    setg %dil
+; X64-NEXT:    movq $-1, %r11
+; X64-NEXT:    cmovlq %r11, %rdi
+; X64-NEXT:    addb %r12b, %r12b
+; X64-NEXT:    sarb %r12b
+; X64-NEXT:    addb %cl, %cl
+; X64-NEXT:    sarb %cl
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpb %r12b, %cl
+; X64-NEXT:    setg %dl
+; X64-NEXT:    cmovlq %r11, %rdx
+; X64-NEXT:    addb %r15b, %r15b
+; X64-NEXT:    sarb %r15b
+; X64-NEXT:    addb %r8b, %r8b
+; X64-NEXT:    sarb %r8b
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpb %r15b, %r8b
+; X64-NEXT:    setg %cl
+; X64-NEXT:    cmovlq %r11, %rcx
+; X64-NEXT:    addb %r14b, %r14b
+; X64-NEXT:    sarb %r14b
+; X64-NEXT:    addb %r9b, %r9b
+; X64-NEXT:    sarb %r9b
+; X64-NEXT:    xorl %r8d, %r8d
+; X64-NEXT:    cmpb %r14b, %r9b
+; X64-NEXT:    setg %r8b
+; X64-NEXT:    cmovlq %r11, %r8
+; X64-NEXT:    addb %bpl, %bpl
+; X64-NEXT:    sarb %bpl
+; X64-NEXT:    addb %sil, %sil
+; X64-NEXT:    sarb %sil
+; X64-NEXT:    xorl %r9d, %r9d
+; X64-NEXT:    cmpb %bpl, %sil
+; X64-NEXT:    setg %r9b
+; X64-NEXT:    cmovlq %r11, %r9
+; X64-NEXT:    addb %bl, %bl
+; X64-NEXT:    sarb %bl
+; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %ebp
+; X64-NEXT:    addb %bpl, %bpl
+; X64-NEXT:    sarb %bpl
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpb %bl, %bpl
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovlq %r11, %rsi
+; X64-NEXT:    addb %r10b, %r10b
+; X64-NEXT:    sarb %r10b
+; X64-NEXT:    movzbl {{[0-9]+}}(%rsp), %ebx
+; X64-NEXT:    addb %bl, %bl
+; X64-NEXT:    sarb %bl
+; X64-NEXT:    xorl %r14d, %r14d
+; X64-NEXT:    cmpb %r10b, %bl
+; X64-NEXT:    setg %r14b
+; X64-NEXT:    cmovlq %r11, %r14
+; X64-NEXT:    movq %r14, %r10
+; X64-NEXT:    shrq $2, %r10
+; X64-NEXT:    movq %r10, 88(%rax)
+; X64-NEXT:    movq %rsi, %r10
+; X64-NEXT:    shlq $9, %r10
+; X64-NEXT:    movq %r10, 72(%rax)
+; X64-NEXT:    movq %r9, (%rax)
+; X64-NEXT:    shlq $62, %r14
+; X64-NEXT:    shrq $55, %rsi
+; X64-NEXT:    orq %r14, %rsi
+; X64-NEXT:    movq %rsi, 80(%rax)
+; X64-NEXT:    movq %r8, %rsi
+; X64-NEXT:    shrq $44, %rsi
+; X64-NEXT:    movq %rsi, 64(%rax)
+; X64-NEXT:    shlq $20, %r8
+; X64-NEXT:    movq %r8, 56(%rax)
+; X64-NEXT:    movq %rcx, %rsi
+; X64-NEXT:    shrq $33, %rsi
+; X64-NEXT:    movq %rsi, 48(%rax)
+; X64-NEXT:    shlq $31, %rcx
+; X64-NEXT:    movq %rcx, 40(%rax)
+; X64-NEXT:    movq %rdx, %rcx
+; X64-NEXT:    shrq $22, %rcx
+; X64-NEXT:    movq %rcx, 32(%rax)
+; X64-NEXT:    shlq $42, %rdx
+; X64-NEXT:    movq %rdx, 24(%rax)
+; X64-NEXT:    movq %rdi, %rcx
+; X64-NEXT:    shrq $11, %rcx
+; X64-NEXT:    movq %rcx, 16(%rax)
+; X64-NEXT:    shlq $53, %rdi
+; X64-NEXT:    movq %rdi, 8(%rax)
+; X64-NEXT:    movb $0, 102(%rax)
+; X64-NEXT:    movw $0, 100(%rax)
+; X64-NEXT:    movl $0, 96(%rax)
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    popq %r12
+; X64-NEXT:    popq %r14
+; X64-NEXT:    popq %r15
+; X64-NEXT:    popq %rbp
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_uncommon_vectors:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $44, %esp
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb %al, (%esp) # 1-byte Spill
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    addb %cl, %cl
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %dh
+; X86-NEXT:    addb %dh, %dh
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %dl
+; X86-NEXT:    addb %dl, %dl
+; X86-NEXT:    sarb %dl
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %ch
+; X86-NEXT:    addb %ch, %ch
+; X86-NEXT:    sarb %ch
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addb %al, %al
+; X86-NEXT:    sarb %al
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %ah
+; X86-NEXT:    addb %ah, %ah
+; X86-NEXT:    sarb %ah
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb %al, %ah
+; X86-NEXT:    setg %al
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    movl $-1, %edi
+; X86-NEXT:    jl .LBB18_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %al, %bl
+; X86-NEXT:    movl %ebx, %esi
+; X86-NEXT:    xorl %edi, %edi
+; X86-NEXT:  .LBB18_2:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %bh
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X86-NEXT:    addb %al, %al
+; X86-NEXT:    movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movzbl (%esp), %eax # 1-byte Folded Reload
+; X86-NEXT:    addb %al, %al
+; X86-NEXT:    movb %al, (%esp) # 1-byte Spill
+; X86-NEXT:    sarb %cl
+; X86-NEXT:    sarb %dh
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    cmpb %dl, %ch
+; X86-NEXT:    setg %dl
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jl .LBB18_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %dl, %al
+; X86-NEXT:    movl %eax, %ebp
+; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:  .LBB18_4:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %ch
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %dl
+; X86-NEXT:    addb %bl, %bl
+; X86-NEXT:    addb %bh, %bh
+; X86-NEXT:    sarb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    sarb (%esp) # 1-byte Folded Spill
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    cmpb %cl, %dh
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jl .LBB18_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movb %cl, %al
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:  .LBB18_6:
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %dh
+; X86-NEXT:    addb %ch, %ch
+; X86-NEXT:    addb %dl, %dl
+; X86-NEXT:    sarb %bl
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    sarb %bh
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    movb (%esp), %bl # 1-byte Reload
+; X86-NEXT:    cmpb {{[-0-9]+}}(%e{{[sb]}}p), %bl # 1-byte Folded Reload
+; X86-NEXT:    setg %bl
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jl .LBB18_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movb %bl, %al
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:  .LBB18_8:
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    addb %cl, %cl
+; X86-NEXT:    addb %dh, %dh
+; X86-NEXT:    sarb %ch
+; X86-NEXT:    sarb %dl
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    cmpb {{[-0-9]+}}(%e{{[sb]}}p), %bh # 1-byte Folded Reload
+; X86-NEXT:    setg %bl
+; X86-NEXT:    movl $-1, %edi
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    jl .LBB18_10
+; X86-NEXT:  # %bb.9:
+; X86-NEXT:    movb %bl, %al
+; X86-NEXT:    movl %eax, %edi
+; X86-NEXT:    xorl %ebp, %ebp
+; X86-NEXT:  .LBB18_10:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sarb %cl
+; X86-NEXT:    sarb %dh
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb %ch, %dl
+; X86-NEXT:    setg %dl
+; X86-NEXT:    movl $-1, (%esp) # 4-byte Folded Spill
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    jl .LBB18_12
+; X86-NEXT:  # %bb.11:
+; X86-NEXT:    movb %dl, %bl
+; X86-NEXT:    movl %ebx, (%esp) # 4-byte Spill
+; X86-NEXT:    xorl %esi, %esi
+; X86-NEXT:  .LBB18_12:
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb %cl, %dh
+; X86-NEXT:    setg %cl
+; X86-NEXT:    movl $-1, %edx
+; X86-NEXT:    jl .LBB18_14
+; X86-NEXT:  # %bb.13:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:  .LBB18_14:
+; X86-NEXT:    movl %edx, 4(%eax)
+; X86-NEXT:    movl %esi, %ecx
+; X86-NEXT:    shrl $2, %ecx
+; X86-NEXT:    movl %ecx, 92(%eax)
+; X86-NEXT:    movl %ebp, %ecx
+; X86-NEXT:    shrl $23, %ecx
+; X86-NEXT:    movl %ecx, 80(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT:    movl %edi, %ecx
+; X86-NEXT:    shrl $12, %ecx
+; X86-NEXT:    movl %ecx, 64(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    movl %ebx, %ecx
+; X86-NEXT:    shrl %ecx
+; X86-NEXT:    movl %ecx, 48(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    shrl $22, %ecx
+; X86-NEXT:    movl %ecx, 36(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT:    movl %edx, %ecx
+; X86-NEXT:    shrl $11, %ecx
+; X86-NEXT:    movl %ecx, 20(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, (%eax)
+; X86-NEXT:    movl (%esp), %ecx # 4-byte Reload
+; X86-NEXT:    shldl $30, %ecx, %esi
+; X86-NEXT:    movl %esi, 88(%eax)
+; X86-NEXT:    shll $30, %ecx
+; X86-NEXT:    movl %ecx, 84(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    shldl $9, %ecx, %ebp
+; X86-NEXT:    movl %ebp, 76(%eax)
+; X86-NEXT:    shll $9, %ecx
+; X86-NEXT:    movl %ecx, 72(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    shldl $20, %ecx, %edi
+; X86-NEXT:    movl %edi, 60(%eax)
+; X86-NEXT:    shll $20, %ecx
+; X86-NEXT:    movl %ecx, 56(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    shldl $31, %ecx, %ebx
+; X86-NEXT:    movl %ebx, 44(%eax)
+; X86-NEXT:    shll $31, %ecx
+; X86-NEXT:    movl %ecx, 40(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT:    shldl $10, %ecx, %esi
+; X86-NEXT:    movl %esi, 32(%eax)
+; X86-NEXT:    shll $10, %ecx
+; X86-NEXT:    movl %ecx, 28(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    shldl $21, %ecx, %edx
+; X86-NEXT:    movl %edx, 16(%eax)
+; X86-NEXT:    shll $21, %ecx
+; X86-NEXT:    movl %ecx, 12(%eax)
+; X86-NEXT:    movb $0, 102(%eax)
+; X86-NEXT:    movw $0, 100(%eax)
+; X86-NEXT:    movl $0, 96(%eax)
+; X86-NEXT:    movl $0, 68(%eax)
+; X86-NEXT:    movl $0, 52(%eax)
+; X86-NEXT:    movl $0, 24(%eax)
+; X86-NEXT:    movl $0, 8(%eax)
+; X86-NEXT:    addl $44, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+  %1 = call <7 x i117> @llvm.scmp(<7 x i7> %x, <7 x i7> %y)
+  ret <7 x i117> %1
+}
+
+define <1 x i3> @scmp_scalarize(<1 x i33> %x, <1 x i33> %y) nounwind {
+; X64-LABEL: scmp_scalarize:
+; X64:       # %bb.0:
+; X64-NEXT:    shlq $31, %rsi
+; X64-NEXT:    sarq $31, %rsi
+; X64-NEXT:    shlq $31, %rdi
+; X64-NEXT:    sarq $31, %rdi
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    setg %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovgel %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_scalarize:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    negl %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    andl $1, %edi
+; X86-NEXT:    negl %edi
+; X86-NEXT:    cmpl %ecx, %esi
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    sbbl %eax, %edx
+; X86-NEXT:    setl %dl
+; X86-NEXT:    cmpl %esi, %ecx
+; X86-NEXT:    sbbl %edi, %eax
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jl .LBB19_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:  .LBB19_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+  %1 = call <1 x i3> @llvm.scmp(<1 x i33> %x, <1 x i33> %y)
+  ret <1 x i3> %1
+}
+
+define <2 x i8> @scmp_bool_operands(<2 x i1> %x, <2 x i1> %y) nounwind {
+; X64-LABEL: scmp_bool_operands:
+; X64:       # %bb.0:
+; X64-NEXT:    movaps %xmm1, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
+; X64-NEXT:    andb $1, %cl
+; X64-NEXT:    negb %cl
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
+; X64-NEXT:    andb $1, %sil
+; X64-NEXT:    negb %sil
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    cmpb %cl, %sil
+; X64-NEXT:    setg %dil
+; X64-NEXT:    movl $255, %ecx
+; X64-NEXT:    cmovll %ecx, %edi
+; X64-NEXT:    shll $8, %edi
+; X64-NEXT:    andb $1, %al
+; X64-NEXT:    negb %al
+; X64-NEXT:    andb $1, %dl
+; X64-NEXT:    negb %dl
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpb %al, %dl
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %ecx, %esi
+; X64-NEXT:    movzbl %sil, %eax
+; X64-NEXT:    orl %edi, %eax
+; X64-NEXT:    movd %eax, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_bool_operands:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    andb $1, %cl
+; X86-NEXT:    negb %cl
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %ah
+; X86-NEXT:    andb $1, %ah
+; X86-NEXT:    negb %ah
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    andb $1, %al
+; X86-NEXT:    negb %al
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    andb $1, %dl
+; X86-NEXT:    negb %dl
+; X86-NEXT:    cmpb %al, %dl
+; X86-NEXT:    setg %ch
+; X86-NEXT:    movb $-1, %dl
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jl .LBB20_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %ch, %al
+; X86-NEXT:  .LBB20_2:
+; X86-NEXT:    cmpb %cl, %ah
+; X86-NEXT:    setg %cl
+; X86-NEXT:    jl .LBB20_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:  .LBB20_4:
+; X86-NEXT:    retl
+  %1 = call <2 x i8> @llvm.scmp(<2 x i1> %x, <2 x i1> %y)
+  ret <2 x i8> %1
+}
+
+define <2 x i16> @scmp_ret_wider_than_operands(<2 x i8> %x, <2 x i8> %y) nounwind {
+; X64-LABEL: scmp_ret_wider_than_operands:
+; X64:       # %bb.0:
+; X64-NEXT:    movd %xmm1, %eax
+; X64-NEXT:    movl %eax, %ecx
+; X64-NEXT:    shrl $8, %ecx
+; X64-NEXT:    movd %xmm0, %edx
+; X64-NEXT:    movl %edx, %esi
+; X64-NEXT:    shrl $8, %esi
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    cmpb %cl, %sil
+; X64-NEXT:    setg %dil
+; X64-NEXT:    movl $65535, %ecx # imm = 0xFFFF
+; X64-NEXT:    cmovll %ecx, %edi
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpb %al, %dl
+; X64-NEXT:    setg %sil
+; X64-NEXT:    cmovll %ecx, %esi
+; X64-NEXT:    movd %esi, %xmm0
+; X64-NEXT:    pinsrw $1, %edi, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: scmp_ret_wider_than_operands:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    setg %ch
+; X86-NEXT:    movl $65535, %edx # imm = 0xFFFF
+; X86-NEXT:    movl $65535, %eax # imm = 0xFFFF
+; X86-NEXT:    jl .LBB21_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %ch, %bl
+; X86-NEXT:    movl %ebx, %eax
+; X86-NEXT:  .LBB21_2:
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    setg %cl
+; X86-NEXT:    jl .LBB21_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:  .LBB21_4:
+; X86-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NEXT:    # kill: def $dx killed $dx killed $edx
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl
+  %1 = call <2 x i16> @llvm.scmp(<2 x i8> %x, <2 x i8> %y)
+  ret <2 x i16> %1
+}
+

diff  --git a/llvm/test/CodeGen/X86/ucmp.ll b/llvm/test/CodeGen/X86/ucmp.ll
new file mode 100644
index 0000000000000..344404749d7ef
--- /dev/null
+++ b/llvm/test/CodeGen/X86/ucmp.ll
@@ -0,0 +1,2498 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
+
+define i8 @ucmp.8.8(i8 %x, i8 %y) nounwind {
+; X64-LABEL: ucmp.8.8:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpb %sil, %dil
+; X64-NEXT:    seta %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovael %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp.8.8:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jb .LBB0_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB0_2:
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.ucmp(i8 %x, i8 %y)
+  ret i8 %1
+}
+
+define i8 @ucmp.8.16(i16 %x, i16 %y) nounwind {
+; X64-LABEL: ucmp.8.16:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpw %si, %di
+; X64-NEXT:    seta %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovael %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp.8.16:
+; X86:       # %bb.0:
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpw {{[0-9]+}}(%esp), %ax
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jb .LBB1_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB1_2:
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.ucmp(i16 %x, i16 %y)
+  ret i8 %1
+}
+
+define i8 @ucmp.8.32(i32 %x, i32 %y) nounwind {
+; X64-LABEL: ucmp.8.32:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpl %esi, %edi
+; X64-NEXT:    seta %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovael %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp.8.32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jb .LBB2_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB2_2:
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  ret i8 %1
+}
+
+define i8 @ucmp.8.64(i64 %x, i64 %y) nounwind {
+; X64-LABEL: ucmp.8.64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    seta %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovael %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp.8.64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %eax, %esi
+; X86-NEXT:    movl %edi, %ecx
+; X86-NEXT:    sbbl %edx, %ecx
+; X86-NEXT:    setb %cl
+; X86-NEXT:    cmpl %esi, %eax
+; X86-NEXT:    sbbl %edi, %edx
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jb .LBB3_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB3_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.ucmp(i64 %x, i64 %y)
+  ret i8 %1
+}
+
+define i8 @ucmp.8.128(i128 %x, i128 %y) nounwind {
+; X64-LABEL: ucmp.8.128:
+; X64:       # %bb.0:
+; X64-NEXT:    cmpq %rdi, %rdx
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    sbbq %rsi, %rax
+; X64-NEXT:    setb %al
+; X64-NEXT:    movzbl %al, %r8d
+; X64-NEXT:    cmpq %rdx, %rdi
+; X64-NEXT:    sbbq %rcx, %rsi
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovael %r8d, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp.8.128:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %ebp, %ebx
+; X86-NEXT:    sbbl %edx, %ebx
+; X86-NEXT:    movl %ecx, %ebx
+; X86-NEXT:    sbbl %eax, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %esi, %ecx
+; X86-NEXT:    sbbl %ebx, %ecx
+; X86-NEXT:    setb %cl
+; X86-NEXT:    cmpl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT:    sbbl %ebp, %edx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl %esi, %ebx
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jb .LBB4_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB4_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.ucmp(i128 %x, i128 %y)
+  ret i8 %1
+}
+
+define i32 @ucmp.32.32(i32 %x, i32 %y) nounwind {
+; X64-LABEL: ucmp.32.32:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpl %esi, %edi
+; X64-NEXT:    seta %cl
+; X64-NEXT:    movl $-1, %eax
+; X64-NEXT:    cmovael %ecx, %eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp.32.32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %dl
+; X86-NEXT:    movl $-1, %eax
+; X86-NEXT:    jb .LBB5_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %dl, %cl
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB5_2:
+; X86-NEXT:    retl
+  %1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
+  ret i32 %1
+}
+
+define i32 @ucmp.32.64(i64 %x, i64 %y) nounwind {
+; X64-LABEL: ucmp.32.64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    seta %cl
+; X64-NEXT:    movl $-1, %eax
+; X64-NEXT:    cmovael %ecx, %eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp.32.64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %eax, %esi
+; X86-NEXT:    movl %edi, %ecx
+; X86-NEXT:    sbbl %edx, %ecx
+; X86-NEXT:    setb %cl
+; X86-NEXT:    cmpl %esi, %eax
+; X86-NEXT:    sbbl %edi, %edx
+; X86-NEXT:    movl $-1, %eax
+; X86-NEXT:    jb .LBB6_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movzbl %cl, %eax
+; X86-NEXT:  .LBB6_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+  %1 = call i32 @llvm.ucmp(i64 %x, i64 %y)
+  ret i32 %1
+}
+
+define i64 @ucmp.64.64(i64 %x, i64 %y) nounwind {
+; X64-LABEL: ucmp.64.64:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    seta %cl
+; X64-NEXT:    movq $-1, %rax
+; X64-NEXT:    cmovaeq %rcx, %rax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp.64.64:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %eax, %esi
+; X86-NEXT:    movl %edi, %ecx
+; X86-NEXT:    sbbl %edx, %ecx
+; X86-NEXT:    setb %cl
+; X86-NEXT:    cmpl %esi, %eax
+; X86-NEXT:    sbbl %edi, %edx
+; X86-NEXT:    movl $-1, %eax
+; X86-NEXT:    movl $-1, %edx
+; X86-NEXT:    jb .LBB7_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movzbl %cl, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:  .LBB7_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+  %1 = call i64 @llvm.ucmp(i64 %x, i64 %y)
+  ret i64 %1
+}
+
+define i4 @ucmp_narrow_result(i32 %x, i32 %y) nounwind {
+; X64-LABEL: ucmp_narrow_result:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpl %esi, %edi
+; X64-NEXT:    seta %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovael %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp_narrow_result:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jb .LBB8_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB8_2:
+; X86-NEXT:    retl
+  %1 = call i4 @llvm.ucmp(i32 %x, i32 %y)
+  ret i4 %1
+}
+
+define i8 @ucmp_narrow_op(i62 %x, i62 %y) nounwind {
+; X64-LABEL: ucmp_narrow_op:
+; X64:       # %bb.0:
+; X64-NEXT:    movabsq $4611686018427387903, %rax # imm = 0x3FFFFFFFFFFFFFFF
+; X64-NEXT:    andq %rax, %rsi
+; X64-NEXT:    andq %rax, %rdi
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    seta %cl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovael %ecx, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp_narrow_op:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl $1073741823, %eax # imm = 0x3FFFFFFF
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    andl %eax, %edx
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %esi, %edi
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    sbbl %edx, %ecx
+; X86-NEXT:    setb %cl
+; X86-NEXT:    cmpl %edi, %esi
+; X86-NEXT:    sbbl %eax, %edx
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jb .LBB9_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB9_2:
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.ucmp(i62 %x, i62 %y)
+  ret i8 %1
+}
+
+define i141 @ucmp_wide_result(i32 %x, i32 %y) nounwind {
+; X64-LABEL: ucmp_wide_result:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpl %esi, %edi
+; X64-NEXT:    seta %cl
+; X64-NEXT:    movq $-1, %rax
+; X64-NEXT:    cmovaeq %rcx, %rax
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp_wide_result:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    seta %bl
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    jb .LBB10_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %bl, %dl
+; X86-NEXT:    movl %edx, %esi
+; X86-NEXT:  .LBB10_2:
+; X86-NEXT:    sbbl %ecx, %ecx
+; X86-NEXT:    movl %ecx, 4(%eax)
+; X86-NEXT:    movl %esi, (%eax)
+; X86-NEXT:    movl $0, 12(%eax)
+; X86-NEXT:    movl $0, 8(%eax)
+; X86-NEXT:    movw $0, 16(%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl $4
+  %1 = call i141 @llvm.ucmp(i32 %x, i32 %y)
+  ret i141 %1
+}
+
+define i8 @ucmp_wide_op(i109 %x, i109 %y) nounwind {
+; X64-LABEL: ucmp_wide_op:
+; X64:       # %bb.0:
+; X64-NEXT:    movabsq $35184372088831, %rax # imm = 0x1FFFFFFFFFFF
+; X64-NEXT:    andq %rax, %rsi
+; X64-NEXT:    andq %rax, %rcx
+; X64-NEXT:    cmpq %rdi, %rdx
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    sbbq %rsi, %rax
+; X64-NEXT:    setb %al
+; X64-NEXT:    movzbl %al, %r8d
+; X64-NEXT:    cmpq %rdx, %rdi
+; X64-NEXT:    sbbq %rcx, %rsi
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovael %r8d, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp_wide_op:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    movl $8191, %eax # imm = 0x1FFF
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    andl %eax, %ecx
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    sbbl %edx, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %edi, %esi
+; X86-NEXT:    sbbl %ebx, %esi
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    sbbl %ecx, %esi
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %ebp, {{[0-9]+}}(%esp)
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    sbbl %edi, %ebx
+; X86-NEXT:    sbbl %eax, %ecx
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jb .LBB11_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X86-NEXT:  .LBB11_2:
+; X86-NEXT:    addl $4, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
+  %1 = call i8 @llvm.ucmp(i109 %x, i109 %y)
+  ret i8 %1
+}
+
+define i41 @ucmp_uncommon_types(i7 %x, i7 %y) nounwind {
+; X64-LABEL: ucmp_uncommon_types:
+; X64:       # %bb.0:
+; X64-NEXT:    andb $127, %sil
+; X64-NEXT:    andb $127, %dil
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpb %sil, %dil
+; X64-NEXT:    seta %cl
+; X64-NEXT:    movq $-1, %rax
+; X64-NEXT:    cmovaeq %rcx, %rax
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp_uncommon_types:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andb $127, %al
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %ah
+; X86-NEXT:    andb $127, %ah
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    cmpb %al, %ah
+; X86-NEXT:    seta %bl
+; X86-NEXT:    movl $-1, %eax
+; X86-NEXT:    jb .LBB12_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %bl, %cl
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:  .LBB12_2:
+; X86-NEXT:    sbbl %edx, %edx
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl
+  %1 = call i41 @llvm.ucmp(i7 %x, i7 %y)
+  ret i41 %1
+}
+
+define <4 x i32> @ucmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind {
+; X64-LABEL: ucmp_normal_vectors:
+; X64:       # %bb.0:
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; X64-NEXT:    movd %xmm2, %eax
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; X64-NEXT:    movd %xmm2, %ecx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpl %eax, %ecx
+; X64-NEXT:    seta %dl
+; X64-NEXT:    movl $-1, %eax
+; X64-NEXT:    cmovbl %eax, %edx
+; X64-NEXT:    movd %edx, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; X64-NEXT:    movd %xmm3, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
+; X64-NEXT:    movd %xmm3, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm3
+; X64-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X64-NEXT:    movd %xmm1, %ecx
+; X64-NEXT:    movd %xmm0, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X64-NEXT:    movd %xmm1, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-NEXT:    movd %xmm0, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm0
+; X64-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; X64-NEXT:    movdqa %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp_normal_vectors:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    movl $-1, %edx
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    jb .LBB13_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %al, %bl
+; X86-NEXT:    movl %ebx, %ebp
+; X86-NEXT:  .LBB13_2:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    seta %al
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    jb .LBB13_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %al, %bl
+; X86-NEXT:    movl %ebx, %esi
+; X86-NEXT:  .LBB13_4:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movl $-1, %edi
+; X86-NEXT:    jb .LBB13_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:  .LBB13_6:
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    seta %cl
+; X86-NEXT:    jb .LBB13_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:  .LBB13_8:
+; X86-NEXT:    movl %edx, 12(%eax)
+; X86-NEXT:    movl %edi, 8(%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %ebp, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+  %1 = call <4 x i32> @llvm.ucmp(<4 x i32> %x, <4 x i32> %y)
+  ret <4 x i32> %1
+}
+
+define <4 x i8> @ucmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind {
+; X64-LABEL: ucmp_narrow_vec_result:
+; X64:       # %bb.0:
+; X64-NEXT:    movd %xmm1, %eax
+; X64-NEXT:    movd %xmm0, %ecx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpl %eax, %ecx
+; X64-NEXT:    seta %dl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovbl %eax, %edx
+; X64-NEXT:    movzbl %dl, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
+; X64-NEXT:    movd %xmm2, %edx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
+; X64-NEXT:    movd %xmm2, %esi
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    cmpl %edx, %esi
+; X64-NEXT:    seta %dil
+; X64-NEXT:    cmovbl %eax, %edi
+; X64-NEXT:    movzbl %dil, %edx
+; X64-NEXT:    shll $8, %edx
+; X64-NEXT:    orl %ecx, %edx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; X64-NEXT:    movd %xmm2, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; X64-NEXT:    movd %xmm2, %esi
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    cmpl %ecx, %esi
+; X64-NEXT:    seta %dil
+; X64-NEXT:    cmovbl %eax, %edi
+; X64-NEXT:    movzbl %dil, %ecx
+; X64-NEXT:    shll $16, %ecx
+; X64-NEXT:    orl %edx, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; X64-NEXT:    movd %xmm1, %edx
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X64-NEXT:    movd %xmm0, %esi
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    cmpl %edx, %esi
+; X64-NEXT:    seta %dil
+; X64-NEXT:    cmovbl %eax, %edi
+; X64-NEXT:    shll $24, %edi
+; X64-NEXT:    orl %ecx, %edi
+; X64-NEXT:    movd %edi, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp_narrow_vec_result:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    seta %ch
+; X86-NEXT:    movb $-1, %dl
+; X86-NEXT:    movb $-1, %cl
+; X86-NEXT:    jb .LBB14_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %ch, %cl
+; X86-NEXT:  .LBB14_2:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    movb $-1, %ch
+; X86-NEXT:    jb .LBB14_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %al, %ch
+; X86-NEXT:  .LBB14_4:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    seta %bl
+; X86-NEXT:    movb $-1, %dh
+; X86-NEXT:    jb .LBB14_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movb %bl, %dh
+; X86-NEXT:  .LBB14_6:
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    seta %bl
+; X86-NEXT:    jb .LBB14_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movb %bl, %dl
+; X86-NEXT:  .LBB14_8:
+; X86-NEXT:    movb %dl, 3(%eax)
+; X86-NEXT:    movb %dh, 2(%eax)
+; X86-NEXT:    movb %ch, 1(%eax)
+; X86-NEXT:    movb %cl, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl $4
+  %1 = call <4 x i8> @llvm.ucmp(<4 x i32> %x, <4 x i32> %y)
+  ret <4 x i8> %1
+}
+
+define <4 x i32> @ucmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind {
+; X64-LABEL: ucmp_narrow_vec_op:
+; X64:       # %bb.0:
+; X64-NEXT:    pxor %xmm2, %xmm2
+; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; X64-NEXT:    pextrw $0, %xmm1, %ecx
+; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
+; X64-NEXT:    movd %xmm3, %eax
+; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; X64-NEXT:    pextrw $0, %xmm0, %edx
+; X64-NEXT:    movdqa %xmm0, %xmm3
+; X64-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[3,3,3,3]
+; X64-NEXT:    movd %xmm0, %esi
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    cmpl %eax, %esi
+; X64-NEXT:    seta %dil
+; X64-NEXT:    movl $-1, %eax
+; X64-NEXT:    cmovbl %eax, %edi
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; X64-NEXT:    movd %xmm0, %esi
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
+; X64-NEXT:    movd %xmm0, %r8d
+; X64-NEXT:    xorl %r9d, %r9d
+; X64-NEXT:    cmpl %esi, %r8d
+; X64-NEXT:    movd %edi, %xmm0
+; X64-NEXT:    seta %r9b
+; X64-NEXT:    cmovbl %eax, %r9d
+; X64-NEXT:    movd %r9d, %xmm2
+; X64-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm0
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X64-NEXT:    movd %xmm1, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
+; X64-NEXT:    movd %xmm1, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm1
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp_narrow_vec_op:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    seta %al
+; X86-NEXT:    movl $-1, %edx
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    jb .LBB15_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %al, %bl
+; X86-NEXT:    movl %ebx, %ebp
+; X86-NEXT:  .LBB15_2:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %ch
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    seta %bl
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    jb .LBB15_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %bl, %al
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:  .LBB15_4:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %ch
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movl $-1, %edi
+; X86-NEXT:    jb .LBB15_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:  .LBB15_6:
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    seta %cl
+; X86-NEXT:    jb .LBB15_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:  .LBB15_8:
+; X86-NEXT:    movl %edx, 12(%eax)
+; X86-NEXT:    movl %edi, 8(%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %ebp, (%eax)
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+  %1 = call <4 x i32> @llvm.ucmp(<4 x i8> %x, <4 x i8> %y)
+  ret <4 x i32> %1
+}
+
+define <16 x i32> @ucmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind {
+; X64-LABEL: ucmp_wide_vec_result:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbp
+; X64-NEXT:    pushq %r15
+; X64-NEXT:    pushq %r14
+; X64-NEXT:    pushq %r13
+; X64-NEXT:    pushq %r12
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    pxor %xmm2, %xmm2
+; X64-NEXT:    movdqa %xmm1, %xmm4
+; X64-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; X64-NEXT:    pextrw $0, %xmm4, %edi
+; X64-NEXT:    movdqa %xmm4, %xmm3
+; X64-NEXT:    pextrw $4, %xmm4, %r11d
+; X64-NEXT:    movdqa %xmm4, %xmm5
+; X64-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; X64-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[3,3,3,3]
+; X64-NEXT:    movd %xmm4, %eax
+; X64-NEXT:    movdqa %xmm0, %xmm6
+; X64-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; X64-NEXT:    pextrw $0, %xmm6, %r8d
+; X64-NEXT:    movdqa %xmm6, %xmm4
+; X64-NEXT:    pextrw $4, %xmm6, %ebx
+; X64-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[3,3,3,3]
+; X64-NEXT:    movd %xmm7, %ecx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %eax, %ecx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    movl $-1, %edx
+; X64-NEXT:    cmovbl %edx, %esi
+; X64-NEXT:    movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[2,3,2,3]
+; X64-NEXT:    movd %xmm7, %esi
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
+; X64-NEXT:    movd %xmm7, %r9d
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpl %esi, %r9d
+; X64-NEXT:    seta %al
+; X64-NEXT:    cmovbl %edx, %eax
+; X64-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %edi, %r8d
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %edx, %esi
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
+; X64-NEXT:    movd %xmm5, %r8d
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm6[1,1,1,1]
+; X64-NEXT:    movd %xmm5, %r9d
+; X64-NEXT:    xorl %edi, %edi
+; X64-NEXT:    cmpl %r8d, %r9d
+; X64-NEXT:    seta %dil
+; X64-NEXT:    cmovbl %edx, %edi
+; X64-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[3,3,3,3]
+; X64-NEXT:    movd %xmm5, %r9d
+; X64-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[3,3,3,3]
+; X64-NEXT:    movd %xmm5, %r10d
+; X64-NEXT:    xorl %r8d, %r8d
+; X64-NEXT:    cmpl %r9d, %r10d
+; X64-NEXT:    seta %r8b
+; X64-NEXT:    cmovbl %edx, %r8d
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[2,3,2,3]
+; X64-NEXT:    movd %xmm5, %r10d
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
+; X64-NEXT:    movd %xmm5, %ebp
+; X64-NEXT:    xorl %r9d, %r9d
+; X64-NEXT:    cmpl %r10d, %ebp
+; X64-NEXT:    seta %r9b
+; X64-NEXT:    cmovbl %edx, %r9d
+; X64-NEXT:    xorl %r10d, %r10d
+; X64-NEXT:    cmpl %r11d, %ebx
+; X64-NEXT:    seta %r10b
+; X64-NEXT:    cmovbl %edx, %r10d
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
+; X64-NEXT:    movd %xmm3, %ebx
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,1,1]
+; X64-NEXT:    movd %xmm3, %ebp
+; X64-NEXT:    xorl %r11d, %r11d
+; X64-NEXT:    cmpl %ebx, %ebp
+; X64-NEXT:    seta %r11b
+; X64-NEXT:    cmovbl %edx, %r11d
+; X64-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; X64-NEXT:    pextrw $0, %xmm1, %r15d
+; X64-NEXT:    movdqa %xmm1, %xmm4
+; X64-NEXT:    movdqa %xmm1, %xmm3
+; X64-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[3,3,3,3]
+; X64-NEXT:    movd %xmm5, %ebp
+; X64-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; X64-NEXT:    pextrw $0, %xmm0, %r12d
+; X64-NEXT:    movdqa %xmm0, %xmm5
+; X64-NEXT:    movdqa %xmm0, %xmm6
+; X64-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[3,3,3,3]
+; X64-NEXT:    movd %xmm7, %r14d
+; X64-NEXT:    xorl %ebx, %ebx
+; X64-NEXT:    cmpl %ebp, %r14d
+; X64-NEXT:    seta %bl
+; X64-NEXT:    cmovbl %edx, %ebx
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm3[2,3,2,3]
+; X64-NEXT:    movd %xmm7, %r14d
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
+; X64-NEXT:    movd %xmm7, %r13d
+; X64-NEXT:    xorl %ebp, %ebp
+; X64-NEXT:    cmpl %r14d, %r13d
+; X64-NEXT:    seta %bpl
+; X64-NEXT:    cmovbl %edx, %ebp
+; X64-NEXT:    xorl %r14d, %r14d
+; X64-NEXT:    cmpl %r15d, %r12d
+; X64-NEXT:    seta %r14b
+; X64-NEXT:    cmovbl %edx, %r14d
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
+; X64-NEXT:    movd %xmm3, %r12d
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm6[1,1,1,1]
+; X64-NEXT:    movd %xmm3, %r13d
+; X64-NEXT:    xorl %r15d, %r15d
+; X64-NEXT:    cmpl %r12d, %r13d
+; X64-NEXT:    seta %r15b
+; X64-NEXT:    cmovbl %edx, %r15d
+; X64-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[3,3,3,3]
+; X64-NEXT:    movd %xmm3, %r13d
+; X64-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[3,3,3,3]
+; X64-NEXT:    movd %xmm2, %eax
+; X64-NEXT:    xorl %r12d, %r12d
+; X64-NEXT:    cmpl %r13d, %eax
+; X64-NEXT:    seta %r12b
+; X64-NEXT:    cmovbl %edx, %r12d
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
+; X64-NEXT:    movd %xmm2, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
+; X64-NEXT:    movd %xmm2, %eax
+; X64-NEXT:    xorl %r13d, %r13d
+; X64-NEXT:    cmpl %ecx, %eax
+; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 4-byte Folded Reload
+; X64-NEXT:    # xmm2 = mem[0],zero,zero,zero
+; X64-NEXT:    pextrw $4, %xmm1, %eax
+; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 4-byte Folded Reload
+; X64-NEXT:    # xmm3 = mem[0],zero,zero,zero
+; X64-NEXT:    pextrw $4, %xmm0, %ecx
+; X64-NEXT:    movd %esi, %xmm0
+; X64-NEXT:    movd %edi, %xmm6
+; X64-NEXT:    movd %r8d, %xmm7
+; X64-NEXT:    movd %r9d, %xmm8
+; X64-NEXT:    movd %r10d, %xmm1
+; X64-NEXT:    movd %r11d, %xmm9
+; X64-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X64-NEXT:    movd %ebx, %xmm10
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
+; X64-NEXT:    movd %ebp, %xmm6
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; X64-NEXT:    movd %r14d, %xmm2
+; X64-NEXT:    punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
+; X64-NEXT:    movd %r15d, %xmm3
+; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
+; X64-NEXT:    movd %r12d, %xmm7
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm8[0]
+; X64-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm10[0],xmm6[1],xmm10[1]
+; X64-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
+; X64-NEXT:    seta %r13b
+; X64-NEXT:    cmovbl %edx, %r13d
+; X64-NEXT:    movd %r13d, %xmm6
+; X64-NEXT:    punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %eax, %ecx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %edx, %esi
+; X64-NEXT:    movd %esi, %xmm3
+; X64-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,1,1]
+; X64-NEXT:    movd %xmm4, %eax
+; X64-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,1,1]
+; X64-NEXT:    movd %xmm4, %ecx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %eax, %ecx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %edx, %esi
+; X64-NEXT:    movd %esi, %xmm4
+; X64-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm6[0]
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    popq %r12
+; X64-NEXT:    popq %r13
+; X64-NEXT:    popq %r14
+; X64-NEXT:    popq %r15
+; X64-NEXT:    popq %rbp
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp_wide_vec_result:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $48, %esp
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %dl
+; X86-NEXT:    seta %dl
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    jb .LBB16_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movb %dl, %cl
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:  .LBB16_2:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    seta %al
+; X86-NEXT:    movl $-1, %edi
+; X86-NEXT:    jb .LBB16_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %al, %dl
+; X86-NEXT:    movl %edx, %edi
+; X86-NEXT:  .LBB16_4:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movl $-1, %ebx
+; X86-NEXT:    jb .LBB16_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movb %cl, %dl
+; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:  .LBB16_6:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    seta %al
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    jb .LBB16_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movb %al, %dl
+; X86-NEXT:    movl %edx, %ebp
+; X86-NEXT:  .LBB16_8:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jb .LBB16_10
+; X86-NEXT:  # %bb.9:
+; X86-NEXT:    movb %cl, %dl
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_10:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    seta %al
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jb .LBB16_12
+; X86-NEXT:  # %bb.11:
+; X86-NEXT:    movb %al, %dl
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_12:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jb .LBB16_14
+; X86-NEXT:  # %bb.13:
+; X86-NEXT:    movb %cl, %dl
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_14:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    seta %al
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jb .LBB16_16
+; X86-NEXT:  # %bb.15:
+; X86-NEXT:    movb %al, %dl
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_16:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movl $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    jb .LBB16_18
+; X86-NEXT:  # %bb.17:
+; X86-NEXT:    movb %cl, %dl
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_18:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    seta %al
+; X86-NEXT:    movl $-1, (%esp) # 4-byte Folded Spill
+; X86-NEXT:    jb .LBB16_20
+; X86-NEXT:  # %bb.19:
+; X86-NEXT:    movb %al, %dl
+; X86-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X86-NEXT:  .LBB16_20:
+; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movl $-1, %ebx
+; X86-NEXT:    jb .LBB16_22
+; X86-NEXT:  # %bb.21:
+; X86-NEXT:    movb %cl, %dl
+; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:  .LBB16_22:
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    seta %al
+; X86-NEXT:    movl $-1, %ebp
+; X86-NEXT:    jb .LBB16_24
+; X86-NEXT:  # %bb.23:
+; X86-NEXT:    movb %al, %bl
+; X86-NEXT:    movl %ebx, %ebp
+; X86-NEXT:  .LBB16_24:
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %dl
+; X86-NEXT:    seta %ah
+; X86-NEXT:    movl $-1, %edx
+; X86-NEXT:    jb .LBB16_26
+; X86-NEXT:  # %bb.25:
+; X86-NEXT:    movb %ah, %bl
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:  .LBB16_26:
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    seta %al
+; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    jb .LBB16_28
+; X86-NEXT:  # %bb.27:
+; X86-NEXT:    movb %al, %bl
+; X86-NEXT:    movl %ebx, %esi
+; X86-NEXT:  .LBB16_28:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %ch
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movl $-1, %edi
+; X86-NEXT:    jb .LBB16_30
+; X86-NEXT:  # %bb.29:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:  .LBB16_30:
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    cmpb {{[0-9]+}}(%esp), %ch
+; X86-NEXT:    seta %cl
+; X86-NEXT:    jb .LBB16_32
+; X86-NEXT:  # %bb.31:
+; X86-NEXT:    movb %cl, %bl
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:  .LBB16_32:
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 60(%eax)
+; X86-NEXT:    movl %edi, 56(%eax)
+; X86-NEXT:    movl %esi, 52(%eax)
+; X86-NEXT:    movl %edx, 48(%eax)
+; X86-NEXT:    movl %ebp, 44(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 40(%eax)
+; X86-NEXT:    movl (%esp), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 36(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 32(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 28(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 24(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 20(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 16(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 12(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 8(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, 4(%eax)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, (%eax)
+; X86-NEXT:    addl $48, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+  %1 = call <16 x i32> @llvm.ucmp(<16 x i8> %x, <16 x i8> %y)
+  ret <16 x i32> %1
+}
+
+define <16 x i8> @ucmp_wide_vec_op(<16 x i32> %x, <16 x i32> %y) nounwind {
+; X64-LABEL: ucmp_wide_vec_op:
+; X64:       # %bb.0:
+; X64-NEXT:    pshufd {{.*#+}} xmm8 = xmm7[3,3,3,3]
+; X64-NEXT:    movd %xmm8, %eax
+; X64-NEXT:    pshufd {{.*#+}} xmm8 = xmm3[3,3,3,3]
+; X64-NEXT:    movd %xmm8, %ecx
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpl %eax, %ecx
+; X64-NEXT:    seta %dl
+; X64-NEXT:    movl $255, %eax
+; X64-NEXT:    cmovbl %eax, %edx
+; X64-NEXT:    movd %edx, %xmm8
+; X64-NEXT:    pshufd {{.*#+}} xmm9 = xmm7[2,3,2,3]
+; X64-NEXT:    movd %xmm9, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm9 = xmm3[2,3,2,3]
+; X64-NEXT:    movd %xmm9, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm9
+; X64-NEXT:    punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; X64-NEXT:    movd %xmm7, %ecx
+; X64-NEXT:    movd %xmm3, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm8
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm7[1,1,1,1]
+; X64-NEXT:    movd %xmm7, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
+; X64-NEXT:    movd %xmm3, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm3
+; X64-NEXT:    punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3],xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm6[3,3,3,3]
+; X64-NEXT:    movd %xmm3, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[3,3,3,3]
+; X64-NEXT:    movd %xmm3, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm3
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
+; X64-NEXT:    movd %xmm7, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm7 = xmm2[2,3,2,3]
+; X64-NEXT:    movd %xmm7, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm7
+; X64-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
+; X64-NEXT:    movd %xmm6, %ecx
+; X64-NEXT:    movd %xmm2, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm3
+; X64-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,1,1]
+; X64-NEXT:    movd %xmm6, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
+; X64-NEXT:    movd %xmm2, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm2
+; X64-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
+; X64-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[3,3,3,3]
+; X64-NEXT:    movd %xmm2, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; X64-NEXT:    movd %xmm2, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
+; X64-NEXT:    movd %xmm6, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
+; X64-NEXT:    movd %xmm6, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm6
+; X64-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; X64-NEXT:    movd %xmm5, %ecx
+; X64-NEXT:    movd %xmm1, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
+; X64-NEXT:    movd %xmm5, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X64-NEXT:    movd %xmm1, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm1
+; X64-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3]
+; X64-NEXT:    movd %xmm1, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
+; X64-NEXT:    movd %xmm1, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
+; X64-NEXT:    movd %xmm5, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
+; X64-NEXT:    movd %xmm5, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm5
+; X64-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; X64-NEXT:    movd %xmm4, %ecx
+; X64-NEXT:    movd %xmm0, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm1
+; X64-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,1,1]
+; X64-NEXT:    movd %xmm4, %ecx
+; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-NEXT:    movd %xmm0, %edx
+; X64-NEXT:    xorl %esi, %esi
+; X64-NEXT:    cmpl %ecx, %edx
+; X64-NEXT:    seta %sil
+; X64-NEXT:    cmovbl %eax, %esi
+; X64-NEXT:    movd %esi, %xmm0
+; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; X64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; X64-NEXT:    movdqa %xmm1, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp_wide_vec_op:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    movb $-1, %dl
+; X86-NEXT:    jb .LBB17_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:  .LBB17_2:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    movb $-1, %ah
+; X86-NEXT:    jb .LBB17_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movb %al, %ah
+; X86-NEXT:  .LBB17_4:
+; X86-NEXT:    movb %ah, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movb $-1, %ch
+; X86-NEXT:    jb .LBB17_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movb %cl, %ch
+; X86-NEXT:  .LBB17_6:
+; X86-NEXT:    movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    movb $-1, %ah
+; X86-NEXT:    jb .LBB17_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movb %al, %ah
+; X86-NEXT:  .LBB17_8:
+; X86-NEXT:    movb %ah, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movb $-1, %ch
+; X86-NEXT:    jb .LBB17_10
+; X86-NEXT:  # %bb.9:
+; X86-NEXT:    movb %cl, %ch
+; X86-NEXT:  .LBB17_10:
+; X86-NEXT:    movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    movb $-1, %ah
+; X86-NEXT:    jb .LBB17_12
+; X86-NEXT:  # %bb.11:
+; X86-NEXT:    movb %al, %ah
+; X86-NEXT:  .LBB17_12:
+; X86-NEXT:    movb %ah, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movb $-1, %ch
+; X86-NEXT:    jb .LBB17_14
+; X86-NEXT:  # %bb.13:
+; X86-NEXT:    movb %cl, %ch
+; X86-NEXT:  .LBB17_14:
+; X86-NEXT:    movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    movb $-1, %ah
+; X86-NEXT:    jb .LBB17_16
+; X86-NEXT:  # %bb.15:
+; X86-NEXT:    movb %al, %ah
+; X86-NEXT:  .LBB17_16:
+; X86-NEXT:    movb %ah, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movb $-1, %ch
+; X86-NEXT:    jb .LBB17_18
+; X86-NEXT:  # %bb.17:
+; X86-NEXT:    movb %cl, %ch
+; X86-NEXT:  .LBB17_18:
+; X86-NEXT:    movb %ch, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    movb $-1, %ah
+; X86-NEXT:    jb .LBB17_20
+; X86-NEXT:  # %bb.19:
+; X86-NEXT:    movb %al, %ah
+; X86-NEXT:  .LBB17_20:
+; X86-NEXT:    movb %ah, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    seta %cl
+; X86-NEXT:    movb $-1, %bh
+; X86-NEXT:    jb .LBB17_22
+; X86-NEXT:  # %bb.21:
+; X86-NEXT:    movb %cl, %bh
+; X86-NEXT:  .LBB17_22:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    movb $-1, %cl
+; X86-NEXT:    jb .LBB17_24
+; X86-NEXT:  # %bb.23:
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:  .LBB17_24:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    seta %ch
+; X86-NEXT:    movb $-1, %dl
+; X86-NEXT:    jb .LBB17_26
+; X86-NEXT:  # %bb.25:
+; X86-NEXT:    movb %ch, %dl
+; X86-NEXT:  .LBB17_26:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    seta %al
+; X86-NEXT:    movb $-1, %ch
+; X86-NEXT:    jb .LBB17_28
+; X86-NEXT:  # %bb.27:
+; X86-NEXT:    movb %al, %ch
+; X86-NEXT:  .LBB17_28:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    seta %bl
+; X86-NEXT:    movb $-1, %dh
+; X86-NEXT:    jb .LBB17_30
+; X86-NEXT:  # %bb.29:
+; X86-NEXT:    movb %bl, %dh
+; X86-NEXT:  .LBB17_30:
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    seta %bl
+; X86-NEXT:    jb .LBB17_32
+; X86-NEXT:  # %bb.31:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB17_32:
+; X86-NEXT:    movb {{[-0-9]+}}(%e{{[sb]}}p), %bl # 1-byte Reload
+; X86-NEXT:    movb %bl, 15(%eax)
+; X86-NEXT:    movb %dh, 14(%eax)
+; X86-NEXT:    movb %ch, 13(%eax)
+; X86-NEXT:    movb %dl, 12(%eax)
+; X86-NEXT:    movb %cl, 11(%eax)
+; X86-NEXT:    movb %bh, 10(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 9(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 8(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 7(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 6(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 5(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 4(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 3(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 2(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, 1(%eax)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, (%eax)
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    retl $4
+  %1 = call <16 x i8> @llvm.ucmp(<16 x i32> %x, <16 x i32> %y)
+  ret <16 x i8> %1
+}
+
+define <17 x i2> @ucmp_uncommon_vectors(<17 x i71> %x, <17 x i71> %y) nounwind {
+; X64-LABEL: ucmp_uncommon_vectors:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbp
+; X64-NEXT:    pushq %r15
+; X64-NEXT:    pushq %r14
+; X64-NEXT:    pushq %r13
+; X64-NEXT:    pushq %r12
+; X64-NEXT:    pushq %rbx
+; X64-NEXT:    subq $120, %rsp
+; X64-NEXT:    movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    andl $127, %edx
+; X64-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    andl $127, %r8d
+; X64-NEXT:    movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, (%rsp) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r13
+; X64-NEXT:    andl $127, %r13d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r15
+; X64-NEXT:    andl $127, %r15d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    andl $127, %eax
+; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rbx
+; X64-NEXT:    andl $127, %ebx
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r12
+; X64-NEXT:    andl $127, %r12d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rbp
+; X64-NEXT:    andl $127, %ebp
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; X64-NEXT:    andl $127, %r11d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r8
+; X64-NEXT:    andl $127, %r8d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; X64-NEXT:    andl $127, %r10d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
+; X64-NEXT:    andl $127, %edx
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rsi
+; X64-NEXT:    andl $127, %esi
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r14
+; X64-NEXT:    andl $127, %r14d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    andl $127, %ecx
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r9
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
+; X64-NEXT:    cmpq %r9, %rdi
+; X64-NEXT:    movq %rcx, %rax
+; X64-NEXT:    sbbq %r14, %rax
+; X64-NEXT:    setb %al
+; X64-NEXT:    cmpq %rdi, %r9
+; X64-NEXT:    sbbq %rcx, %r14
+; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    movl $255, %r14d
+; X64-NEXT:    cmovbl %r14d, %eax
+; X64-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    cmpq %rax, %rcx
+; X64-NEXT:    movq %rsi, %rdi
+; X64-NEXT:    sbbq %rdx, %rdi
+; X64-NEXT:    setb %dil
+; X64-NEXT:    cmpq %rcx, %rax
+; X64-NEXT:    sbbq %rsi, %rdx
+; X64-NEXT:    movzbl %dil, %eax
+; X64-NEXT:    cmovbl %r14d, %eax
+; X64-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    cmpq %rax, %rcx
+; X64-NEXT:    movq %r10, %rdx
+; X64-NEXT:    sbbq %r8, %rdx
+; X64-NEXT:    setb %dl
+; X64-NEXT:    cmpq %rcx, %rax
+; X64-NEXT:    sbbq %r10, %r8
+; X64-NEXT:    movzbl %dl, %eax
+; X64-NEXT:    cmovbl %r14d, %eax
+; X64-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    cmpq %rax, %rcx
+; X64-NEXT:    movq %r11, %rdx
+; X64-NEXT:    sbbq %rbp, %rdx
+; X64-NEXT:    setb %dl
+; X64-NEXT:    cmpq %rcx, %rax
+; X64-NEXT:    sbbq %r11, %rbp
+; X64-NEXT:    movzbl %dl, %eax
+; X64-NEXT:    cmovbl %r14d, %eax
+; X64-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    cmpq %rax, %rcx
+; X64-NEXT:    movq %r12, %rdx
+; X64-NEXT:    sbbq %rbx, %rdx
+; X64-NEXT:    setb %dl
+; X64-NEXT:    cmpq %rcx, %rax
+; X64-NEXT:    sbbq %r12, %rbx
+; X64-NEXT:    movzbl %dl, %eax
+; X64-NEXT:    cmovbl %r14d, %eax
+; X64-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    cmpq %rax, %rcx
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; X64-NEXT:    movq %rsi, %rdx
+; X64-NEXT:    sbbq %r15, %rdx
+; X64-NEXT:    setb %dl
+; X64-NEXT:    cmpq %rcx, %rax
+; X64-NEXT:    sbbq %rsi, %r15
+; X64-NEXT:    movzbl %dl, %eax
+; X64-NEXT:    cmovbl %r14d, %eax
+; X64-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    cmpq %rax, %rcx
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; X64-NEXT:    movq %rsi, %rdx
+; X64-NEXT:    sbbq %r13, %rdx
+; X64-NEXT:    setb %dl
+; X64-NEXT:    cmpq %rcx, %rax
+; X64-NEXT:    sbbq %rsi, %r13
+; X64-NEXT:    movzbl %dl, %eax
+; X64-NEXT:    cmovbl %r14d, %eax
+; X64-NEXT:    movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
+; X64-NEXT:    cmpq %rcx, %rdx
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; X64-NEXT:    movq %rdi, %rsi
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT:    sbbq %rax, %rsi
+; X64-NEXT:    setb %sil
+; X64-NEXT:    cmpq %rdx, %rcx
+; X64-NEXT:    sbbq %rdi, %rax
+; X64-NEXT:    movzbl %sil, %ebp
+; X64-NEXT:    cmovbl %r14d, %ebp
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rsi
+; X64-NEXT:    cmpq %rdx, %rsi
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT:    movq %rcx, %rdi
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT:    sbbq %rax, %rdi
+; X64-NEXT:    setb %dil
+; X64-NEXT:    cmpq %rsi, %rdx
+; X64-NEXT:    sbbq %rcx, %rax
+; X64-NEXT:    movzbl %dil, %ebx
+; X64-NEXT:    cmovbl %r14d, %ebx
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rsi
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT:    movq %rcx, %r8
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT:    sbbq %rax, %r8
+; X64-NEXT:    setb %r8b
+; X64-NEXT:    cmpq %rdi, %rsi
+; X64-NEXT:    sbbq %rcx, %rax
+; X64-NEXT:    movzbl %r8b, %r10d
+; X64-NEXT:    cmovbl %r14d, %r10d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r8
+; X64-NEXT:    cmpq %rdi, %r8
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT:    movq %rcx, %r9
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT:    sbbq %rax, %r9
+; X64-NEXT:    setb %r9b
+; X64-NEXT:    cmpq %r8, %rdi
+; X64-NEXT:    sbbq %rcx, %rax
+; X64-NEXT:    movzbl %r9b, %r8d
+; X64-NEXT:    cmovbl %r14d, %r8d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r9
+; X64-NEXT:    cmpq %rdi, %r9
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT:    movq %rcx, %r11
+; X64-NEXT:    movq (%rsp), %rax # 8-byte Reload
+; X64-NEXT:    sbbq %rax, %r11
+; X64-NEXT:    setb %r11b
+; X64-NEXT:    cmpq %r9, %rdi
+; X64-NEXT:    sbbq %rcx, %rax
+; X64-NEXT:    movzbl %r11b, %r9d
+; X64-NEXT:    cmovbl %r14d, %r9d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; X64-NEXT:    cmpq %rdi, %r11
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT:    movq %rcx, %r15
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT:    sbbq %rax, %r15
+; X64-NEXT:    setb %r15b
+; X64-NEXT:    cmpq %r11, %rdi
+; X64-NEXT:    sbbq %rcx, %rax
+; X64-NEXT:    movzbl %r15b, %r11d
+; X64-NEXT:    cmovbl %r14d, %r11d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT:    cmpq %rax, %rdi
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
+; X64-NEXT:    movq %rdx, %r15
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT:    sbbq %rcx, %r15
+; X64-NEXT:    setb %r15b
+; X64-NEXT:    cmpq %rdi, %rax
+; X64-NEXT:    sbbq %rdx, %rcx
+; X64-NEXT:    movzbl %r15b, %edi
+; X64-NEXT:    cmovbl %r14d, %edi
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r15
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT:    cmpq %rcx, %r15
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
+; X64-NEXT:    movq %rdx, %r12
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT:    sbbq %rax, %r12
+; X64-NEXT:    setb %r12b
+; X64-NEXT:    cmpq %r15, %rcx
+; X64-NEXT:    sbbq %rdx, %rax
+; X64-NEXT:    movzbl %r12b, %r15d
+; X64-NEXT:    cmovbl %r14d, %r15d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %r12
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT:    cmpq %rcx, %r12
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
+; X64-NEXT:    movq %rdx, %r13
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT:    sbbq %rax, %r13
+; X64-NEXT:    setb %r13b
+; X64-NEXT:    cmpq %r12, %rcx
+; X64-NEXT:    sbbq %rdx, %rax
+; X64-NEXT:    movzbl %r13b, %r12d
+; X64-NEXT:    cmovbl %r14d, %r12d
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rsi
+; X64-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
+; X64-NEXT:    cmpq %rsi, %rdx
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT:    movq %rcx, %r13
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT:    sbbq %rax, %r13
+; X64-NEXT:    setb %r13b
+; X64-NEXT:    cmpq %rdx, %rsi
+; X64-NEXT:    sbbq %rcx, %rax
+; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; X64-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 4-byte Folded Reload
+; X64-NEXT:    # xmm1 = mem[0],zero,zero,zero
+; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 4-byte Folded Reload
+; X64-NEXT:    # xmm2 = mem[0],zero,zero,zero
+; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 4-byte Folded Reload
+; X64-NEXT:    # xmm3 = mem[0],zero,zero,zero
+; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload
+; X64-NEXT:    # xmm4 = mem[0],zero,zero,zero
+; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 4-byte Folded Reload
+; X64-NEXT:    # xmm5 = mem[0],zero,zero,zero
+; X64-NEXT:    movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 4-byte Folded Reload
+; X64-NEXT:    # xmm6 = mem[0],zero,zero,zero
+; X64-NEXT:    movd %ebp, %xmm7
+; X64-NEXT:    movd %ebx, %xmm8
+; X64-NEXT:    movd %r10d, %xmm9
+; X64-NEXT:    movd %r8d, %xmm10
+; X64-NEXT:    movd %r9d, %xmm11
+; X64-NEXT:    movd %r11d, %xmm12
+; X64-NEXT:    movd %edi, %xmm13
+; X64-NEXT:    movd %r15d, %xmm14
+; X64-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; X64-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; X64-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; X64-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; X64-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
+; X64-NEXT:    punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; X64-NEXT:    punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1],xmm11[2],xmm9[2],xmm11[3],xmm9[3]
+; X64-NEXT:    punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; X64-NEXT:    movd %r12d, %xmm0
+; X64-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
+; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1]
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm7[0]
+; X64-NEXT:    movzbl %r13b, %eax
+; X64-NEXT:    cmovbl %r14d, %eax
+; X64-NEXT:    andl $3, %eax
+; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; X64-NEXT:    movb %al, 4(%rdi)
+; X64-NEXT:    movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    andl $3, %eax
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
+; X64-NEXT:    andl $3, %ecx
+; X64-NEXT:    leaq (%rcx,%rax,4), %rax
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
+; X64-NEXT:    andl $3, %ecx
+; X64-NEXT:    shll $4, %ecx
+; X64-NEXT:    orq %rax, %rcx
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    andl $3, %eax
+; X64-NEXT:    shll $6, %eax
+; X64-NEXT:    orq %rcx, %rax
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
+; X64-NEXT:    andl $3, %ecx
+; X64-NEXT:    shll $8, %ecx
+; X64-NEXT:    orq %rax, %rcx
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    andl $3, %eax
+; X64-NEXT:    shll $10, %eax
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
+; X64-NEXT:    andl $3, %edx
+; X64-NEXT:    shll $12, %edx
+; X64-NEXT:    orq %rax, %rdx
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %esi
+; X64-NEXT:    andl $3, %esi
+; X64-NEXT:    shll $14, %esi
+; X64-NEXT:    orq %rdx, %rsi
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    andl $3, %eax
+; X64-NEXT:    shll $16, %eax
+; X64-NEXT:    orq %rsi, %rax
+; X64-NEXT:    orq %rcx, %rax
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
+; X64-NEXT:    andl $3, %ecx
+; X64-NEXT:    shll $18, %ecx
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
+; X64-NEXT:    andl $3, %edx
+; X64-NEXT:    shll $20, %edx
+; X64-NEXT:    orq %rcx, %rdx
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
+; X64-NEXT:    andl $3, %ecx
+; X64-NEXT:    shll $22, %ecx
+; X64-NEXT:    orq %rdx, %rcx
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
+; X64-NEXT:    andl $3, %edx
+; X64-NEXT:    shll $24, %edx
+; X64-NEXT:    orq %rcx, %rdx
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %ecx
+; X64-NEXT:    andl $3, %ecx
+; X64-NEXT:    shlq $26, %rcx
+; X64-NEXT:    orq %rdx, %rcx
+; X64-NEXT:    orq %rax, %rcx
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %eax
+; X64-NEXT:    andl $3, %eax
+; X64-NEXT:    shlq $28, %rax
+; X64-NEXT:    movzbl -{{[0-9]+}}(%rsp), %edx
+; X64-NEXT:    andl $3, %edx
+; X64-NEXT:    shlq $30, %rdx
+; X64-NEXT:    orq %rax, %rdx
+; X64-NEXT:    orq %rcx, %rdx
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movl %edx, (%rdi)
+; X64-NEXT:    addq $120, %rsp
+; X64-NEXT:    popq %rbx
+; X64-NEXT:    popq %r12
+; X64-NEXT:    popq %r13
+; X64-NEXT:    popq %r14
+; X64-NEXT:    popq %r15
+; X64-NEXT:    popq %rbp
+; X64-NEXT:    retq
+;
+; X86-LABEL: ucmp_uncommon_vectors:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    subl $44, %esp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    andl $127, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    andl $127, %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl $127, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    andl $127, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    cmpl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl %ebp, %edx
+; X86-NEXT:    sbbl %ebx, %edx
+; X86-NEXT:    movl %esi, %edx
+; X86-NEXT:    sbbl %eax, %edx
+; X86-NEXT:    movl $0, %edx
+; X86-NEXT:    sbbl %edx, %edx
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    sbbl %ebp, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    sbbl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl $0, %eax
+; X86-NEXT:    sbbl %eax, %eax
+; X86-NEXT:    movb $-1, %bl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    jb .LBB18_2
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 1-byte Folded Reload
+; X86-NEXT:  .LBB18_2:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    andl $127, %edx
+; X86-NEXT:    andl $127, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    cmpl %eax, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    sbbl %ebp, %ebx
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    sbbl %ecx, %ebx
+; X86-NEXT:    movl $0, %ebx
+; X86-NEXT:    sbbl %ebx, %ebx
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    sbbl %edi, %ecx
+; X86-NEXT:    movl $0, %edi
+; X86-NEXT:    sbbl %edi, %edi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jb .LBB18_4
+; X86-NEXT:  # %bb.3:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB18_4:
+; X86-NEXT:    andl $127, %eax
+; X86-NEXT:    andl $127, %ebx
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl %ebx, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:    sbbl %ebp, %edi
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    sbbl %edx, %edi
+; X86-NEXT:    movl $0, %edi
+; X86-NEXT:    sbbl %edi, %edi
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    sbbl %ebx, %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    movl $0, %esi
+; X86-NEXT:    sbbl %esi, %esi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jb .LBB18_6
+; X86-NEXT:  # %bb.5:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload
+; X86-NEXT:    movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB18_6:
+; X86-NEXT:    andl $127, %ecx
+; X86-NEXT:    andl $127, %edi
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    cmpl %edi, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %edi, %esi
+; X86-NEXT:    sbbl %ebp, %esi
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    movl %ebx, %esi
+; X86-NEXT:    sbbl %eax, %esi
+; X86-NEXT:    movl $0, %esi
+; X86-NEXT:    sbbl %esi, %esi
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %edi, %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    sbbl %ebx, %eax
+; X86-NEXT:    movl $0, %ebx
+; X86-NEXT:    sbbl %ebx, %ebx
+; X86-NEXT:    movb $-1, %bl
+; X86-NEXT:    jb .LBB18_8
+; X86-NEXT:  # %bb.7:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 1-byte Folded Reload
+; X86-NEXT:  .LBB18_8:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    andl $127, %edx
+; X86-NEXT:    andl $127, %esi
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    sbbl %ebp, %ebx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    sbbl %ecx, %ebx
+; X86-NEXT:    movl $0, %ebx
+; X86-NEXT:    sbbl %ebx, %ebx
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %eax, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    sbbl %edi, %ecx
+; X86-NEXT:    movl $0, %edi
+; X86-NEXT:    sbbl %edi, %edi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jb .LBB18_10
+; X86-NEXT:  # %bb.9:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB18_10:
+; X86-NEXT:    andl $127, %eax
+; X86-NEXT:    andl $127, %esi
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl %esi, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    sbbl %ebp, %edi
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:    sbbl %edx, %edi
+; X86-NEXT:    movl $0, %edi
+; X86-NEXT:    sbbl %edi, %edi
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    sbbl %esi, %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    sbbl %ebx, %edx
+; X86-NEXT:    movl $0, %esi
+; X86-NEXT:    sbbl %esi, %esi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jb .LBB18_12
+; X86-NEXT:  # %bb.11:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload
+; X86-NEXT:    movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB18_12:
+; X86-NEXT:    andl $127, %ecx
+; X86-NEXT:    andl $127, %edi
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    cmpl %edi, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %edi, %esi
+; X86-NEXT:    sbbl %ebp, %esi
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    movl %ebx, %esi
+; X86-NEXT:    sbbl %eax, %esi
+; X86-NEXT:    movl $0, %esi
+; X86-NEXT:    sbbl %esi, %esi
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    sbbl %edi, %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %ebx, %eax
+; X86-NEXT:    movl $0, %ebx
+; X86-NEXT:    sbbl %ebx, %ebx
+; X86-NEXT:    movb $-1, %bl
+; X86-NEXT:    jb .LBB18_14
+; X86-NEXT:  # %bb.13:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 1-byte Folded Reload
+; X86-NEXT:  .LBB18_14:
+; X86-NEXT:    movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    andl $127, %edx
+; X86-NEXT:    andl $127, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    cmpl %ebp, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    sbbl %esi, %ebx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    sbbl %ecx, %ebx
+; X86-NEXT:    movl $0, %ebx
+; X86-NEXT:    sbbl %ebx, %ebx
+; X86-NEXT:    setb %bl
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %edi, %ecx
+; X86-NEXT:    movl $0, %edi
+; X86-NEXT:    sbbl %edi, %edi
+; X86-NEXT:    movb $-1, %bh
+; X86-NEXT:    jb .LBB18_16
+; X86-NEXT:  # %bb.15:
+; X86-NEXT:    movb %bl, %bh
+; X86-NEXT:  .LBB18_16:
+; X86-NEXT:    movb %bh, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    andl $127, %eax
+; X86-NEXT:    andl $127, %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmpl %ebx, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    sbbl %esi, %ebx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    sbbl %edx, %ebx
+; X86-NEXT:    movl $0, %ebx
+; X86-NEXT:    sbbl %ebx, %ebx
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %edi, %edx
+; X86-NEXT:    movl $0, %edx
+; X86-NEXT:    sbbl %edx, %edx
+; X86-NEXT:    movb $-1, %dl
+; X86-NEXT:    jb .LBB18_18
+; X86-NEXT:  # %bb.17:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload
+; X86-NEXT:  .LBB18_18:
+; X86-NEXT:    movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    andl $127, %ecx
+; X86-NEXT:    andl $127, %ebx
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %ebx, %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    movl %ebp, %edx
+; X86-NEXT:    sbbl %eax, %edx
+; X86-NEXT:    movl $0, %edx
+; X86-NEXT:    sbbl %edx, %edx
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    sbbl %ebx, %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %ebp, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl $0, %ebp
+; X86-NEXT:    sbbl %ebp, %ebp
+; X86-NEXT:    movb $-1, %dh
+; X86-NEXT:    jb .LBB18_20
+; X86-NEXT:  # %bb.19:
+; X86-NEXT:    movb {{[-0-9]+}}(%e{{[sb]}}p), %dh # 1-byte Reload
+; X86-NEXT:  .LBB18_20:
+; X86-NEXT:    movb %dh, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    andl $127, %edi
+; X86-NEXT:    andl $127, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    cmpl %ebp, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:    sbbl %ecx, %edx
+; X86-NEXT:    movl $0, %edx
+; X86-NEXT:    sbbl %edx, %edx
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %eax, %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %ebx, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl $0, %ecx
+; X86-NEXT:    sbbl %ecx, %ecx
+; X86-NEXT:    movb $-1, %cl
+; X86-NEXT:    jb .LBB18_22
+; X86-NEXT:  # %bb.21:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:  .LBB18_22:
+; X86-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    andl $127, %eax
+; X86-NEXT:    andl $127, %edx
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    cmpl %ebp, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl %esi, %ecx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT:    movl %edx, %ecx
+; X86-NEXT:    sbbl %edi, %ecx
+; X86-NEXT:    movl $0, %ecx
+; X86-NEXT:    sbbl %ecx, %ecx
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %ebx, %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl %edx, %edi
+; X86-NEXT:    movl $0, %esi
+; X86-NEXT:    sbbl %esi, %esi
+; X86-NEXT:    movb $-1, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    jb .LBB18_24
+; X86-NEXT:  # %bb.23:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload
+; X86-NEXT:    movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:  .LBB18_24:
+; X86-NEXT:    andl $127, %ebp
+; X86-NEXT:    andl $127, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %esi, %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %ecx, %esi
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT:    movl %edx, %esi
+; X86-NEXT:    sbbl %eax, %esi
+; X86-NEXT:    movl $0, %esi
+; X86-NEXT:    sbbl %esi, %esi
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    cmpl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %edx, %eax
+; X86-NEXT:    movl $0, %eax
+; X86-NEXT:    sbbl %eax, %eax
+; X86-NEXT:    movb $-1, %al
+; X86-NEXT:    jb .LBB18_26
+; X86-NEXT:  # %bb.25:
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X86-NEXT:  .LBB18_26:
+; X86-NEXT:    movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    andl $127, %edi
+; X86-NEXT:    andl $127, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    cmpl %ecx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl %esi, %eax
+; X86-NEXT:    movl %ebx, %eax
+; X86-NEXT:    sbbl %ebp, %eax
+; X86-NEXT:    movl $0, %eax
+; X86-NEXT:    sbbl %eax, %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    cmpl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %ebx, %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl $0, %ebp
+; X86-NEXT:    sbbl %ebp, %ebp
+; X86-NEXT:    movb $-1, %ah
+; X86-NEXT:    jb .LBB18_28
+; X86-NEXT:  # %bb.27:
+; X86-NEXT:    movb %al, %ah
+; X86-NEXT:  .LBB18_28:
+; X86-NEXT:    movb %ah, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    andl $127, %edx
+; X86-NEXT:    andl $127, %ecx
+; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    cmpl %ebp, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl %esi, %eax
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    sbbl %edi, %eax
+; X86-NEXT:    movl $0, %eax
+; X86-NEXT:    sbbl %eax, %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    cmpl %ebx, %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %ecx, %edi
+; X86-NEXT:    movl $0, %edi
+; X86-NEXT:    sbbl %edi, %edi
+; X86-NEXT:    movb $-1, %cl
+; X86-NEXT:    jb .LBB18_30
+; X86-NEXT:  # %bb.29:
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:  .LBB18_30:
+; X86-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    andl $127, %ebp
+; X86-NEXT:    andl $127, %ebx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    cmpl %ecx, %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    sbbl %esi, %eax
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    sbbl %edx, %eax
+; X86-NEXT:    movl $0, %eax
+; X86-NEXT:    sbbl %eax, %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    cmpl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl %ecx, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl $0, %ecx
+; X86-NEXT:    sbbl %ecx, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movb $-1, %cl
+; X86-NEXT:    jb .LBB18_32
+; X86-NEXT:  # %bb.31:
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:  .LBB18_32:
+; X86-NEXT:    movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-NEXT:    movl %edi, %ecx
+; X86-NEXT:    cmpl %esi, %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %edi, %eax
+; X86-NEXT:    sbbl %edx, %eax
+; X86-NEXT:    movl %ebx, %eax
+; X86-NEXT:    sbbl %ebp, %eax
+; X86-NEXT:    movl $0, %eax
+; X86-NEXT:    sbbl %eax, %eax
+; X86-NEXT:    setb %al
+; X86-NEXT:    cmpl %ecx, %esi
+; X86-NEXT:    sbbl %edi, %edx
+; X86-NEXT:    sbbl %ebx, %ebp
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT:    sbbl %edx, %edx
+; X86-NEXT:    movb $-1, %dl
+; X86-NEXT:    jb .LBB18_34
+; X86-NEXT:  # %bb.33:
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:  .LBB18_34:
+; X86-NEXT:    movzbl %dl, %eax
+; X86-NEXT:    andl $3, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movb %al, 4(%edx)
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 1-byte Folded Reload
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 1-byte Folded Reload
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload
+; X86-NEXT:    andl $3, %ecx
+; X86-NEXT:    andl $3, %edi
+; X86-NEXT:    leal (%edi,%ecx,4), %eax
+; X86-NEXT:    andl $3, %esi
+; X86-NEXT:    shll $4, %esi
+; X86-NEXT:    orl %eax, %esi
+; X86-NEXT:    andl $3, %ebx
+; X86-NEXT:    shll $6, %ebx
+; X86-NEXT:    orl %esi, %ebx
+; X86-NEXT:    andl $3, %ebp
+; X86-NEXT:    shll $8, %ebp
+; X86-NEXT:    orl %ebx, %ebp
+; X86-NEXT:    andl $3, %edx
+; X86-NEXT:    shll $10, %edx
+; X86-NEXT:    orl %ebp, %edx
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X86-NEXT:    andl $3, %eax
+; X86-NEXT:    shll $12, %eax
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    andl $3, %ecx
+; X86-NEXT:    shll $14, %ecx
+; X86-NEXT:    orl %eax, %ecx
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X86-NEXT:    andl $3, %eax
+; X86-NEXT:    shll $16, %eax
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
+; X86-NEXT:    andl $3, %edi
+; X86-NEXT:    shll $18, %edi
+; X86-NEXT:    orl %eax, %edi
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
+; X86-NEXT:    andl $3, %ecx
+; X86-NEXT:    shll $20, %ecx
+; X86-NEXT:    orl %edi, %ecx
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 1-byte Folded Reload
+; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    andl $3, %esi
+; X86-NEXT:    shll $22, %esi
+; X86-NEXT:    andl $3, %edi
+; X86-NEXT:    shll $24, %edi
+; X86-NEXT:    orl %esi, %edi
+; X86-NEXT:    andl $3, %ebx
+; X86-NEXT:    shll $26, %ebx
+; X86-NEXT:    orl %edi, %ebx
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X86-NEXT:    andl $3, %eax
+; X86-NEXT:    shll $28, %eax
+; X86-NEXT:    orl %ebx, %eax
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload
+; X86-NEXT:    shll $30, %edx
+; X86-NEXT:    orl %eax, %edx
+; X86-NEXT:    orl %ecx, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl %edx, (%eax)
+; X86-NEXT:    addl $44, %esp
+; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl $4
+  %1 = call <17 x i2> @llvm.ucmp(<17 x i71> %x, <17 x i71> %y)
+  ret <17 x i2> %1
+}


        


More information about the llvm-commits mailing list