[llvm] 7af3d39 - [SystemZ] Optimize vector comparison reductions

Ulrich Weigand via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 15 10:29:19 PDT 2025


Author: Ulrich Weigand
Date: 2025-03-15T18:28:44+01:00
New Revision: 7af3d3929e8523cad144ea94551328ce103f9d68

URL: https://github.com/llvm/llvm-project/commit/7af3d3929e8523cad144ea94551328ce103f9d68
DIFF: https://github.com/llvm/llvm-project/commit/7af3d3929e8523cad144ea94551328ce103f9d68.diff

LOG: [SystemZ] Optimize vector comparison reductions

Generate efficient code using the condition code set by the
VECTOR (FP) COMPARE family of instructions to implement
vector comparison reductions, e.g. as resulting from
__builtin_reduce_and/or of some vector comparsion.

Fixes: https://github.com/llvm/llvm-project/issues/129434

Added: 
    llvm/test/CodeGen/SystemZ/vec-reduce-cmp.ll

Modified: 
    llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
    llvm/lib/Target/SystemZ/SystemZISelLowering.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 1e0c303a2e4da..f87307030651a 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -774,6 +774,7 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
                        ISD::UINT_TO_FP,
                        ISD::STRICT_FP_EXTEND,
                        ISD::BSWAP,
+                       ISD::SETCC,
                        ISD::SDIV,
                        ISD::UDIV,
                        ISD::SREM,
@@ -3260,6 +3261,43 @@ static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL,
     return;
   if (C.Op0.getValueType() != MVT::i128)
     return;
+
+  // Recognize vector comparison reductions.
+  if ((C.CCMask == SystemZ::CCMASK_CMP_EQ ||
+       C.CCMask == SystemZ::CCMASK_CMP_NE) &&
+      (isNullConstant(C.Op1) || isAllOnesConstant(C.Op1))) {
+    bool CmpEq = C.CCMask == SystemZ::CCMASK_CMP_EQ;
+    bool CmpNull = isNullConstant(C.Op1);
+    SDValue Src = peekThroughBitcasts(C.Op0);
+    if (Src.hasOneUse() && isBitwiseNot(Src)) {
+      Src = Src.getOperand(0);
+      CmpNull = !CmpNull;
+    }
+    unsigned Opcode = 0;
+    if (Src.hasOneUse()) {
+      switch (Src.getOpcode()) {
+      case SystemZISD::VICMPE: Opcode = SystemZISD::VICMPES; break;
+      case SystemZISD::VICMPH: Opcode = SystemZISD::VICMPHS; break;
+      case SystemZISD::VICMPHL: Opcode = SystemZISD::VICMPHLS; break;
+      case SystemZISD::VFCMPE: Opcode = SystemZISD::VFCMPES; break;
+      case SystemZISD::VFCMPH: Opcode = SystemZISD::VFCMPHS; break;
+      case SystemZISD::VFCMPHE: Opcode = SystemZISD::VFCMPHES; break;
+      default: break;
+      }
+    }
+    if (Opcode) {
+      C.Opcode = Opcode;
+      C.Op0 = Src->getOperand(0);
+      C.Op1 = Src->getOperand(1);
+      C.CCValid = SystemZ::CCMASK_VCMP;
+      C.CCMask = CmpNull ? SystemZ::CCMASK_VCMP_NONE : SystemZ::CCMASK_VCMP_ALL;
+      if (!CmpEq)
+        C.CCMask ^= C.CCValid;
+      return;
+    }
+  }
+
+  // Everything below here is not useful if we have native i128 compares.
   if (DAG.getSubtarget<SystemZSubtarget>().hasVectorEnhancements3())
     return;
 
@@ -3443,8 +3481,14 @@ static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
     return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1,
                        DAG.getTargetConstant(RegisterOnly, DL, MVT::i32));
   }
-  if (C.Opcode == SystemZISD::VICMPES) {
-    SDVTList VTs = DAG.getVTList(C.Op0.getValueType(), MVT::i32);
+  if (C.Opcode == SystemZISD::VICMPES ||
+      C.Opcode == SystemZISD::VICMPHS ||
+      C.Opcode == SystemZISD::VICMPHLS ||
+      C.Opcode == SystemZISD::VFCMPES ||
+      C.Opcode == SystemZISD::VFCMPHS ||
+      C.Opcode == SystemZISD::VFCMPHES) {
+    EVT IntVT = C.Op0.getValueType().changeVectorElementTypeToInteger();
+    SDVTList VTs = DAG.getVTList(IntVT, MVT::i32);
     SDValue Val = DAG.getNode(C.Opcode, DL, VTs, C.Op0, C.Op1);
     return SDValue(Val.getNode(), 1);
   }
@@ -8036,6 +8080,42 @@ SDValue SystemZTargetLowering::combineBSWAP(
   return SDValue();
 }
 
+SDValue SystemZTargetLowering::combineSETCC(
+    SDNode *N, DAGCombinerInfo &DCI) const {
+  SelectionDAG &DAG = DCI.DAG;
+  const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
+  const SDValue LHS = N->getOperand(0);
+  const SDValue RHS = N->getOperand(1);
+  bool CmpNull = isNullConstant(RHS);
+  bool CmpAllOnes = isAllOnesConstant(RHS);
+  EVT VT = N->getValueType(0);
+  SDLoc DL(N);
+
+  // Match icmp_eq/ne(bitcast(icmp(X,Y)),0/-1) reduction patterns, and
+  // change the outer compare to a i128 compare.  This will normally
+  // allow the reduction to be recognized in adjustICmp128, and even if
+  // not, the i128 compare will still generate better code.
+  if ((CC == ISD::SETNE || CC == ISD::SETEQ) && (CmpNull || CmpAllOnes)) {
+    SDValue Src = peekThroughBitcasts(LHS);
+    if (Src.getOpcode() == ISD::SETCC &&
+        Src.getValueType().isFixedLengthVector() &&
+        Src.getValueType().getScalarType() == MVT::i1) {
+      EVT CmpVT = Src.getOperand(0).getValueType();
+      if (CmpVT.getSizeInBits() == 128) {
+        EVT IntVT = CmpVT.changeVectorElementTypeToInteger();
+        SDValue LHS =
+            DAG.getBitcast(MVT::i128, DAG.getSExtOrTrunc(Src, DL, IntVT));
+        SDValue RHS = CmpNull ? DAG.getConstant(0, DL, MVT::i128)
+                              : DAG.getAllOnesConstant(DL, MVT::i128);
+        return DAG.getNode(ISD::SETCC, DL, VT, LHS, RHS, N->getOperand(2),
+                           N->getFlags());
+      }
+    }
+  }
+
+  return SDValue();
+}
+
 static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
   // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
   // set by the CCReg instruction using the CCValid / CCMask masks,
@@ -8286,6 +8366,7 @@ SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
   case ISD::SINT_TO_FP:
   case ISD::UINT_TO_FP:         return combineINT_TO_FP(N, DCI);
   case ISD::BSWAP:              return combineBSWAP(N, DCI);
+  case ISD::SETCC:              return combineSETCC(N, DCI);
   case SystemZISD::BR_CCMASK:   return combineBR_CCMASK(N, DCI);
   case SystemZISD::SELECT_CCMASK: return combineSELECT_CCMASK(N, DCI);
   case SystemZISD::GET_CCMASK:  return combineGET_CCMASK(N, DCI);

diff  --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 839a550012444..32cd9d5aa6733 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -755,6 +755,7 @@ class SystemZTargetLowering : public TargetLowering {
   SDValue combineFP_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineINT_TO_FP(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const;
+  SDValue combineSETCC(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
   SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;

diff  --git a/llvm/test/CodeGen/SystemZ/vec-reduce-cmp.ll b/llvm/test/CodeGen/SystemZ/vec-reduce-cmp.ll
new file mode 100644
index 0000000000000..f8ec428969e0b
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-reduce-cmp.ll
@@ -0,0 +1,3812 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Test reduction of vector comparisons
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z16 | FileCheck %s
+
+define zeroext i1 @vec_all_eq_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_eq_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_eq_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_eq_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_eq_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_eq_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_eq_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_eq_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_eq_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_eq_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_eq_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_eq_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_eq_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_eq_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_eq_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_eq_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_eq_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_eq_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_eq_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_eq_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_eq_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_eq_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_eq_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_eq_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_eq_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_eq_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_eq_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_eq_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_eq_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_eq_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_eq_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_eq_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_eq_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_eq_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_eq_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_eq_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_eq_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_eq_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_eq_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_eq_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_eq_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_eq_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_eq_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_eq_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_eq_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_eq_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_eq_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_eq_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_eq_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_eq_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_eq_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_eq_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_eq_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_eq_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_eq_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_eq_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_eq_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_eq_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_eq_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_eq_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_eq_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_eq_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ne <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_eq_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_eq_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp eq <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_eq_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_all_eq_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp une <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_eq_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_any_eq_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp oeq <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_eq_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_all_eq_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp une <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_eq_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_any_eq_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp oeq <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_eq_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_all_eq_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp une <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_eq_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_any_eq_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp oeq <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_eq_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_all_eq_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp une <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_eq_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_any_eq_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp oeq <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ne_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_ne_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ne_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_ne_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ne_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_ne_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ne_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_ne_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ne_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_ne_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ne_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_ne_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ne_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_ne_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ne_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_ne_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ne_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_ne_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ne_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_ne_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ne_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_ne_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ne_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_ne_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ne_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_ne_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ne_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_ne_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ne_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_ne_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ne_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_ne_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ne_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_ne_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ne_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_ne_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ne_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_ne_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ne_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_ne_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ne_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_ne_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ne_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_ne_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ne_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_ne_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ne_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_ne_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ne_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_ne_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ne_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_ne_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ne_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_ne_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ne_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_ne_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ne_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_ne_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ne_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_ne_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ne_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_ne_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp eq <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ne_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_ne_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vceqgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ne <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ne_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_all_ne_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp oeq <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ne_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_any_ne_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp une <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ne_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_all_ne_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp oeq <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ne_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_any_ne_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp une <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ne_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_all_ne_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp oeq <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ne_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_any_ne_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp une <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ne_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_all_ne_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp oeq <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ne_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_any_ne_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfcedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp une <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_lt_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_lt_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sge <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_lt_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_lt_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp slt <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_lt_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_lt_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sge <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_lt_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_lt_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp slt <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_lt_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_lt_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp uge <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_lt_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_lt_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ult <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_lt_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_lt_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp uge <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_lt_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_lt_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ult <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_lt_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_lt_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sge <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_lt_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_lt_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp slt <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_lt_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_lt_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sge <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_lt_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_lt_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp slt <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_lt_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_lt_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp uge <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_lt_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_lt_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ult <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_lt_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_lt_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp uge <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_lt_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_lt_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ult <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_lt_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_lt_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sge <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_lt_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_lt_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp slt <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_lt_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_lt_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sge <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_lt_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_lt_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp slt <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_lt_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_lt_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp uge <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_lt_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_lt_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ult <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_lt_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_lt_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp uge <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_lt_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_lt_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ult <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_lt_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_lt_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sge <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_lt_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_lt_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp slt <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_lt_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_lt_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sge <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_lt_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_lt_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp slt <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_lt_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_lt_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp uge <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_lt_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_lt_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ult <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_lt_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_lt_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp uge <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_lt_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_lt_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ult <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_lt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_all_lt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp uge <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_lt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_any_lt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp olt <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_lt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_all_lt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp uge <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_lt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_any_lt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp olt <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_lt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_all_lt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp uge <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_lt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_any_lt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp olt <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_lt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_all_lt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp uge <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_lt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_any_lt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp olt <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_le_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_le_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sgt <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_le_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_le_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sle <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_le_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_le_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sgt <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_le_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_le_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sle <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_le_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_le_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ugt <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_le_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_le_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ule <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_le_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_le_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ugt <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_le_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_le_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ule <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_le_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_le_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sgt <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_le_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_le_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sle <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_le_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_le_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sgt <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_le_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_le_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sle <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_le_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_le_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ugt <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_le_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_le_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ule <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_le_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_le_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ugt <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_le_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_le_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ule <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_le_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_le_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sgt <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_le_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_le_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sle <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_le_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_le_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sgt <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_le_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_le_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sle <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_le_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_le_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ugt <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_le_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_le_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ule <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_le_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_le_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ugt <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_le_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_le_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ule <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_le_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_le_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sgt <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_le_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_le_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sle <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_le_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_le_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sgt <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_le_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_le_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sle <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_le_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_le_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ugt <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_le_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_le_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ule <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_le_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_le_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ugt <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_le_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_le_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ule <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_le_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_all_le_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ugt <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_le_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_any_le_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp ole <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_le_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_all_le_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ugt <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_le_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_any_le_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp ole <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_le_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_all_le_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ugt <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_le_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_any_le_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp ole <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_le_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_all_le_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ugt <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_le_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_any_le_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp ole <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_gt_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_gt_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sle <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_gt_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_gt_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sgt <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_gt_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_gt_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sle <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_gt_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_gt_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sgt <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_gt_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_gt_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ule <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_gt_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_gt_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ugt <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_gt_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_gt_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ule <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_gt_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_gt_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ugt <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_gt_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_gt_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sle <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_gt_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_gt_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sgt <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_gt_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_gt_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sle <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_gt_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_gt_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sgt <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_gt_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_gt_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ule <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_gt_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_gt_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ugt <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_gt_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_gt_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ule <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_gt_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_gt_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ugt <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_gt_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_gt_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sle <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_gt_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_gt_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sgt <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_gt_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_gt_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sle <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_gt_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_gt_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sgt <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_gt_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_gt_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ule <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_gt_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_gt_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ugt <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_gt_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_gt_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ule <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_gt_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_gt_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ugt <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_gt_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_gt_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sle <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_gt_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_gt_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sgt <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_gt_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_gt_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp sle <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_gt_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_gt_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sgt <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_gt_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_gt_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ule <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_gt_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_gt_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ugt <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_gt_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_gt_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ule <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_gt_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_gt_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp ugt <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_gt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_all_gt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ule <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_gt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_any_gt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp ogt <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_gt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_all_gt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ule <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_gt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_any_gt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp ogt <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_gt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_all_gt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ule <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_gt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_any_gt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp ogt <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_gt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_all_gt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ule <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_gt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_any_gt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp ogt <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ge_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_ge_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp slt <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ge_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_ge_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sge <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ge_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_ge_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp slt <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ge_vsc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_ge_vsc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sge <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ge_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_all_ge_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ult <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp eq i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ge_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: vec_any_ge_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp uge <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %1 = icmp ne i16 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ge_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_all_ge_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ult <16 x i8> %a, %b
+  %1 = bitcast <16 x i1> %0 to i16
+  %2 = icmp ne i16 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ge_vuc(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: not_vec_any_ge_vuc:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp uge <16 x i8> %a, %b
+  %0 = bitcast <16 x i1> %cmp to i16
+  %.not = icmp eq i16 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ge_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_ge_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp slt <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ge_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_ge_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sge <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ge_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_ge_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp slt <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ge_vss(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_ge_vss:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sge <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ge_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_all_ge_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ult <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp eq i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ge_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: vec_any_ge_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp uge <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %1 = icmp ne i8 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ge_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_all_ge_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ult <8 x i16> %a, %b
+  %1 = bitcast <8 x i1> %0 to i8
+  %2 = icmp ne i8 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ge_vus(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: not_vec_any_ge_vus:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlhs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp uge <8 x i16> %a, %b
+  %0 = bitcast <8 x i1> %cmp to i8
+  %.not = icmp eq i8 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ge_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_ge_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp slt <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ge_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_ge_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sge <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ge_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_ge_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp slt <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ge_vsi(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_ge_vsi:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sge <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ge_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_all_ge_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ult <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ge_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: vec_any_ge_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp uge <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ge_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_all_ge_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ult <4 x i32> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ge_vui(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_vec_any_ge_vui:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlfs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp uge <4 x i32> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ge_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_ge_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp slt <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ge_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_ge_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sge <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ge_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_ge_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp slt <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ge_vsl(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_ge_vsl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp sge <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ge_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_all_ge_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ult <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ge_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: vec_any_ge_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp uge <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ge_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_all_ge_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = icmp ult <2 x i64> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ge_vul(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: not_vec_any_ge_vul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vchlgs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = icmp uge <2 x i64> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ge_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_all_ge_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ult <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ge_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_any_ge_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp oge <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %1 = icmp ne i4 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ge_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_all_ge_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ult <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ge_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_any_ge_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp oge <4 x float> %a, %b
+  %0 = bitcast <4 x i1> %cmp to i4
+  %.not = icmp eq i4 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ge_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_all_ge_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ult <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ge_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_any_ge_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp oge <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %1 = icmp ne i2 %0, 0
+  ret i1 %1
+}
+
+define zeroext i1 @not_vec_all_ge_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_all_ge_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ult <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ge_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_any_ge_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %cmp = fcmp oge <2 x double> %a, %b
+  %0 = bitcast <2 x i1> %cmp to i2
+  %.not = icmp eq i2 %0, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_nlt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_all_nlt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp olt <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_nlt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_any_nlt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp uge <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_all_nlt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_all_nlt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp olt <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_nlt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_any_nlt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp uge <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %.not = icmp eq i4 %1, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_nlt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_all_nlt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp olt <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_nlt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_any_nlt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp uge <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_all_nlt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_all_nlt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp olt <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_nlt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_any_nlt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp uge <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %.not = icmp eq i2 %1, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_nle_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_all_nle_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ole <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_nle_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_any_nle_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ugt <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_all_nle_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_all_nle_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ole <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_nle_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_any_nle_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ugt <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %.not = icmp eq i4 %1, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_nle_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_all_nle_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ole <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_nle_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_any_nle_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ugt <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_all_nle_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_all_nle_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ole <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_nle_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_any_nle_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v26, %v24
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ugt <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %.not = icmp eq i2 %1, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ngt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_all_ngt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ogt <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ngt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_any_ngt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ule <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_all_ngt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_all_ngt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ogt <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ngt_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_any_ngt_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchsbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ule <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %.not = icmp eq i4 %1, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_ngt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_all_ngt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ogt <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_ngt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_any_ngt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ule <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_all_ngt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_all_ngt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ogt <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_ngt_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_any_ngt_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchdbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ule <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %.not = icmp eq i2 %1, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_nge_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_all_nge_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp oge <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp eq i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_nge_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: vec_any_nge_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ult <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_all_nge_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_all_nge_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp oge <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %2 = icmp ne i4 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_nge_vf(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: not_vec_any_nge_vf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchesbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ult <4 x float> %a, %b
+  %1 = bitcast <4 x i1> %0 to i4
+  %.not = icmp eq i4 %1, 0
+  ret i1 %.not
+}
+
+define zeroext i1 @vec_all_nge_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_all_nge_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghio %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp oge <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp eq i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @vec_any_nge_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: vec_any_nge_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghinhe %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ult <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_all_nge_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_all_nge_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghile %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp oge <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %2 = icmp ne i2 %1, 0
+  ret i1 %2
+}
+
+define zeroext i1 @not_vec_any_nge_vd(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: not_vec_any_nge_vd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vfchedbs %v0, %v24, %v26
+; CHECK-NEXT:    lghi %r2, 0
+; CHECK-NEXT:    locghie %r2, 1
+; CHECK-NEXT:    br %r14
+entry:
+  %0 = fcmp ult <2 x double> %a, %b
+  %1 = bitcast <2 x i1> %0 to i2
+  %.not = icmp eq i2 %1, 0
+  ret i1 %.not
+}


        


More information about the llvm-commits mailing list