[llvm] ae1bb44 - [VE] v256.32|64 setcc isel and tests

Simon Moll via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 8 04:22:02 PST 2022


Author: Simon Moll
Date: 2022-02-08T13:20:55+01:00
New Revision: ae1bb44ed80b7b60c3fd2426c8bee3df93e4a314

URL: https://github.com/llvm/llvm-project/commit/ae1bb44ed80b7b60c3fd2426c8bee3df93e4a314
DIFF: https://github.com/llvm/llvm-project/commit/ae1bb44ed80b7b60c3fd2426c8bee3df93e4a314.diff

LOG: [VE] v256.32|64 setcc isel and tests

Reviewed By: kaz7

Differential Revision: https://reviews.llvm.org/D119223

Added: 
    llvm/test/CodeGen/VE/Vector/vec_fcmp.ll
    llvm/test/CodeGen/VE/Vector/vec_icmp.ll

Modified: 
    llvm/lib/Target/VE/VEISelLowering.cpp
    llvm/lib/Target/VE/VVPInstrInfo.td
    llvm/lib/Target/VE/VVPInstrPatternsVec.td
    llvm/lib/Target/VE/VVPNodes.def

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index a54861abb6d88..51f710b0f13f9 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -2728,6 +2728,12 @@ SDValue VETargetLowering::lowerToVVP(SDValue Op, SelectionDAG &DAG) const {
     auto OnFalse = Op->getOperand(2);
     return CDAG.getNode(VVPOpcode, LegalVecVT, {OnTrue, OnFalse, Mask, AVL});
   }
+  if (VVPOpcode == VEISD::VVP_SETCC) {
+    auto LHS = Op->getOperand(0);
+    auto RHS = Op->getOperand(1);
+    auto Pred = Op->getOperand(2);
+    return CDAG.getNode(VVPOpcode, LegalVecVT, {LHS, RHS, Pred, Mask, AVL});
+  }
   llvm_unreachable("lowerToVVP called for unexpected SDNode.");
 }
 

diff  --git a/llvm/lib/Target/VE/VVPInstrInfo.td b/llvm/lib/Target/VE/VVPInstrInfo.td
index ef9c238066c07..a607788b884df 100644
--- a/llvm/lib/Target/VE/VVPInstrInfo.td
+++ b/llvm/lib/Target/VE/VVPInstrInfo.td
@@ -48,6 +48,19 @@ def SDTSelectVVP : SDTypeProfile<1, 4, [       // vp_select, vp_merge
   IsVLVT<4>
 ]>;
 
+// SetCC (lhs, rhs, cc, mask, vl)
+def SDTSetCCVVP : SDTypeProfile<1, 5, [        // vp_setcc
+  SDTCisVec<0>,
+  SDTCisVec<1>,
+  SDTCisSameNumEltsAs<0, 1>,
+  SDTCisSameAs<1, 2>,
+  SDTCisVT<3, OtherVT>,
+  SDTCisInt<4>,
+  SDTCisSameNumEltsAs<0, 4>,
+  IsVLVT<5>
+]>;
+
+
 // Binary operator commutative pattern.
 class vvp_commutative<SDNode RootOp> :
   PatFrags<
@@ -90,3 +103,6 @@ def vvp_fdiv    : SDNode<"VEISD::VVP_FDIV",  SDTFPBinOpVVP>;
 // } Binary Operators
 
 def vvp_select : SDNode<"VEISD::VVP_SELECT", SDTSelectVVP>;
+
+// setcc (lhs, rhs, cc, mask, vl)
+def vvp_setcc  : SDNode<"VEISD::VVP_SETCC", SDTSetCCVVP>;

diff  --git a/llvm/lib/Target/VE/VVPInstrPatternsVec.td b/llvm/lib/Target/VE/VVPInstrPatternsVec.td
index cd39e56bb3715..22de6ddf9d6be 100644
--- a/llvm/lib/Target/VE/VVPInstrPatternsVec.td
+++ b/llvm/lib/Target/VE/VVPInstrPatternsVec.td
@@ -293,3 +293,30 @@ defm : Merge_mvv_ShortLong<vvp_select,
 defm : Merge_mvv_ShortLong<vvp_select,
                            v256i64,
                            v256i32, "VMRG">;
+
+multiclass Set_CC<ValueType DataVT, string FmkBaseName, string CmpBaseName, SDPatternOperator CCMatcher, SDNodeXForm CCConv> {
+  // Unmasked.
+  def : Pat<(v256i1 (vvp_setcc
+              DataVT:$LHS, DataVT:$RHS, CCMatcher:$cond, (v256i1 true_mask), i32:$vl)),
+              (!cast<Instruction>(FmkBaseName#"vl")
+                (CCConv $cond),
+                (!cast<Instruction>(CmpBaseName#"vvl")
+                  $LHS, $RHS, $vl),
+                $vl)>;
+  // Masked.
+  def : Pat<(v256i1 (vvp_setcc
+              DataVT:$LHS, DataVT:$RHS, CCMatcher:$cond, v256i1:$vm, i32:$vl)),
+              (!cast<Instruction>(FmkBaseName#"vml")
+                (CCConv $cond),
+                (!cast<Instruction>(CmpBaseName#"vvl")
+                  $LHS, $RHS, $vl),
+                $vm, $vl)>;
+}
+
+defm : Set_CC<v256i64,"VFMKL","VCMPUL",CCUIOp,icond2cc>;
+defm : Set_CC<v256i64,"VFMKL","VCMPSL",CCSIOp,icond2cc>;
+defm : Set_CC<v256f64,"VFMKL","VFCMPD",cond,fcond2cc>;
+
+defm : Set_CC<v256i32,"VFMKW","VCMPUW",CCUIOp,icond2cc>;
+defm : Set_CC<v256i32,"VFMKW","VCMPSWZX",CCSIOp,icond2cc>;
+defm : Set_CC<v256f32,"VFMKS","VFCMPS",cond,fcond2cc>;

diff  --git a/llvm/lib/Target/VE/VVPNodes.def b/llvm/lib/Target/VE/VVPNodes.def
index 8000f84c5dbe5..edb0cbe69efec 100644
--- a/llvm/lib/Target/VE/VVPNodes.def
+++ b/llvm/lib/Target/VE/VVPNodes.def
@@ -59,6 +59,8 @@ ADD_BINARY_VVP_OP_COMPACT(FSUB)
 ADD_BINARY_VVP_OP_COMPACT(FMUL)
 ADD_BINARY_VVP_OP_COMPACT(FDIV)
 
+ADD_VVP_OP(VVP_SETCC, SETCC)
+
 // Shuffles.
 ADD_VVP_OP(VVP_SELECT,VSELECT)
 HANDLE_VP_TO_VVP(VP_SELECT, VVP_SELECT)

diff  --git a/llvm/test/CodeGen/VE/Vector/vec_fcmp.ll b/llvm/test/CodeGen/VE/Vector/vec_fcmp.ll
new file mode 100644
index 0000000000000..5c6f3550388c5
--- /dev/null
+++ b/llvm/test/CodeGen/VE/Vector/vec_fcmp.ll
@@ -0,0 +1,681 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+
+; <256 x float>
+
+;; TODO v256i1 zero-mask isel
+;; ; Function Attrs: nounwind
+;; define fastcc <256 x i1> @fcmp_false_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+;;   %z = fcmp false <256 x float> %x, %y
+;;   ret <256 x i1> %z
+;; }
+;;
+;; ; Function Attrs: nounwind
+;; define fastcc <256 x i1> @fcmp_false_sv_v256f32(float %x, <256 x float> %y) {
+;;   %xins = insertelement <256 x float> undef, float %x, i32 0
+;;   %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+;;   %z = fcmp false <256 x float> %vx, %y
+;;   ret <256 x i1> %z
+;; }
+;;
+;; ; Function Attrs: nounwind
+;; define fastcc <256 x i1> @fcmp_false_vs_v256f32(<256 x float> %x, float %y) {
+;;   %yins = insertelement <256 x float> undef, float %y, i32 0
+;;   %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+;;   %z = fcmp false <256 x float> %x, %vy
+;;   ret <256 x i1> %z
+;; }
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_oeq_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_oeq_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.eq %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp oeq <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_oeq_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_oeq_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.eq %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp oeq <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_oeq_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_oeq_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.eq %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp oeq <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ogt_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ogt_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp ogt <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ogt_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ogt_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ogt <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ogt_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_ogt_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ogt <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_oge_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_oge_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp oge <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_oge_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_oge_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp oge <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_oge_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_oge_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp oge <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_olt_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_olt_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp olt <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_olt_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_olt_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp olt <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_olt_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_olt_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp olt <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ole_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ole_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp ole <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ole_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ole_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ole <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ole_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_ole_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ole <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_one_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_one_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.ne %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp one <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_one_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_one_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.ne %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp one <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_one_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_one_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.ne %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp one <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ord_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ord_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.num %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp ord <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ord_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ord_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.num %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ord <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ord_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_ord_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.num %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ord <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_uno_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_uno_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.nan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp uno <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_uno_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_uno_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.nan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp uno <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_uno_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_uno_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.nan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp uno <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ueq_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ueq_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.eqnan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp ueq <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ueq_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ueq_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.eqnan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ueq <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ueq_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_ueq_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.eqnan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ueq <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ugt_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ugt_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.gtnan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp ugt <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ugt_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ugt_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.gtnan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ugt <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ugt_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_ugt_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.gtnan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ugt <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_uge_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_uge_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.genan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp uge <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_uge_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_uge_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.genan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp uge <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_uge_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_uge_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.genan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp uge <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ult_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ult_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.ltnan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp ult <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ult_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ult_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.ltnan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ult <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ult_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_ult_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.ltnan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ult <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ule_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ule_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.lenan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp ule <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ule_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_ule_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.lenan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ule <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_ule_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_ule_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.lenan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp ule <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_une_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_une_vv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.nenan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = fcmp une <256 x float> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_une_sv_v256f32(float %x, <256 x float> %y) {
+; CHECK-LABEL: fcmp_une_sv_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.s.nenan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x float> undef, float %x, i32 0
+  %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp une <256 x float> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @fcmp_une_vs_v256f32(<256 x float> %x, float %y) {
+; CHECK-LABEL: fcmp_une_vs_v256f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vfcmp.s %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.s.nenan %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x float> undef, float %y, i32 0
+  %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+  %z = fcmp une <256 x float> %x, %vy
+  ret <256 x i1> %z
+}
+
+;; TODO v256i1 all-one mask isel.
+;; ; Function Attrs: nounwind
+;; define fastcc <256 x i1> @fcmp_true_vv_v256f32(<256 x float> %x, <256 x float> %y) {
+;;   %z = fcmp true <256 x float> %x, %y
+;;   ret <256 x i1> %z
+;; }
+;;
+;; ; Function Attrs: nounwind
+;; define fastcc <256 x i1> @fcmp_true_sv_v256f32(float %x, <256 x float> %y) {
+;;   %xins = insertelement <256 x float> undef, float %x, i32 0
+;;   %vx = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer
+;;   %z = fcmp true <256 x float> %vx, %y
+;;   ret <256 x i1> %z
+;; }
+;;
+;; ; Function Attrs: nounwind
+;; define fastcc <256 x i1> @fcmp_true_vs_v256f32(<256 x float> %x, float %y) {
+;;   %yins = insertelement <256 x float> undef, float %y, i32 0
+;;   %vy = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer
+;;   %z = fcmp true <256 x float> %x, %vy
+;;   ret <256 x i1> %z
+;; }

diff  --git a/llvm/test/CodeGen/VE/Vector/vec_icmp.ll b/llvm/test/CodeGen/VE/Vector/vec_icmp.ll
new file mode 100644
index 0000000000000..09e88baed15ff
--- /dev/null
+++ b/llvm/test/CodeGen/VE/Vector/vec_icmp.ll
@@ -0,0 +1,934 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+
+; <256 x i32>
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sgt_vv_v256i32(<256 x i32> %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_sgt_vv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp sgt <256 x i32> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sgt_sv_v256i32(i32 %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_sgt_sv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.w.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i32> undef, i32 %x, i32 0
+  %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp sgt <256 x i32> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sgt_vs_v256i32(<256 x i32> %x, i32 %y) {
+; CHECK-LABEL: icmp_sgt_vs_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i32> undef, i32 %y, i32 0
+  %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp sgt <256 x i32> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sge_vv_v256i32(<256 x i32> %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_sge_vv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp sge <256 x i32> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sge_sv_v256i32(i32 %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_sge_sv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.w.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i32> undef, i32 %x, i32 0
+  %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp sge <256 x i32> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sge_vs_v256i32(<256 x i32> %x, i32 %y) {
+; CHECK-LABEL: icmp_sge_vs_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i32> undef, i32 %y, i32 0
+  %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp sge <256 x i32> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_eq_vv_v256i32(<256 x i32> %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_eq_vv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.eq %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp eq <256 x i32> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_eq_sv_v256i32(i32 %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_eq_sv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.w.eq %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i32> undef, i32 %x, i32 0
+  %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp eq <256 x i32> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_eq_vs_v256i32(<256 x i32> %x, i32 %y) {
+; CHECK-LABEL: icmp_eq_vs_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.eq %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i32> undef, i32 %y, i32 0
+  %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp eq <256 x i32> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ne_vv_v256i32(<256 x i32> %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_ne_vv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.ne %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp ne <256 x i32> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ne_sv_v256i32(i32 %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_ne_sv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.w.ne %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i32> undef, i32 %x, i32 0
+  %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp ne <256 x i32> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ne_vs_v256i32(<256 x i32> %x, i32 %y) {
+; CHECK-LABEL: icmp_ne_vs_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.ne %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i32> undef, i32 %y, i32 0
+  %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp ne <256 x i32> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sle_vv_v256i32(<256 x i32> %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_sle_vv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp sle <256 x i32> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sle_sv_v256i32(i32 %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_sle_sv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.w.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i32> undef, i32 %x, i32 0
+  %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp sle <256 x i32> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sle_vs_v256i32(<256 x i32> %x, i32 %y) {
+; CHECK-LABEL: icmp_sle_vs_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i32> undef, i32 %y, i32 0
+  %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp sle <256 x i32> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_slt_vv_v256i32(<256 x i32> %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_slt_vv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp slt <256 x i32> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_slt_sv_v256i32(i32 %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_slt_sv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.w.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i32> undef, i32 %x, i32 0
+  %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp slt <256 x i32> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_slt_vs_v256i32(<256 x i32> %x, i32 %y) {
+; CHECK-LABEL: icmp_slt_vs_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.w.zx %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i32> undef, i32 %y, i32 0
+  %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp slt <256 x i32> %x, %vy
+  ret <256 x i1> %z
+}
+
+
+
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ugt_vv_v256i32(<256 x i32> %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_ugt_vv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp ugt <256 x i32> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ugt_sv_v256i32(i32 %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_ugt_sv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.w.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i32> undef, i32 %x, i32 0
+  %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp ugt <256 x i32> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ugt_vs_v256i32(<256 x i32> %x, i32 %y) {
+; CHECK-LABEL: icmp_ugt_vs_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i32> undef, i32 %y, i32 0
+  %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp ugt <256 x i32> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_uge_vv_v256i32(<256 x i32> %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_uge_vv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp uge <256 x i32> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_uge_sv_v256i32(i32 %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_uge_sv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.w.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i32> undef, i32 %x, i32 0
+  %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp uge <256 x i32> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_uge_vs_v256i32(<256 x i32> %x, i32 %y) {
+; CHECK-LABEL: icmp_uge_vs_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i32> undef, i32 %y, i32 0
+  %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp uge <256 x i32> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ule_vv_v256i32(<256 x i32> %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_ule_vv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp ule <256 x i32> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ule_sv_v256i32(i32 %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_ule_sv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.w.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i32> undef, i32 %x, i32 0
+  %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp ule <256 x i32> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ule_vs_v256i32(<256 x i32> %x, i32 %y) {
+; CHECK-LABEL: icmp_ule_vs_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i32> undef, i32 %y, i32 0
+  %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp ule <256 x i32> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ult_vv_v256i32(<256 x i32> %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_ult_vv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp ult <256 x i32> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ult_sv_v256i32(i32 %x, <256 x i32> %y) {
+; CHECK-LABEL: icmp_ult_sv_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.w.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i32> undef, i32 %x, i32 0
+  %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp ult <256 x i32> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ult_vs_v256i32(<256 x i32> %x, i32 %y) {
+; CHECK-LABEL: icmp_ult_vs_v256i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.w %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.w.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i32> undef, i32 %y, i32 0
+  %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer
+  %z = icmp ult <256 x i32> %x, %vy
+  ret <256 x i1> %z
+}
+
+
+
+; <256 x i64>
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sgt_vv_v256i64(<256 x i64> %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_sgt_vv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmps.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp sgt <256 x i64> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sgt_sv_v256i64(i64 %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_sgt_sv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.l %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.l.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i64> undef, i64 %x, i32 0
+  %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp sgt <256 x i64> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sgt_vs_v256i64(<256 x i64> %x, i64 %y) {
+; CHECK-LABEL: icmp_sgt_vs_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i64> undef, i64 %y, i32 0
+  %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp sgt <256 x i64> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sge_vv_v256i64(<256 x i64> %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_sge_vv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmps.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp sge <256 x i64> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sge_sv_v256i64(i64 %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_sge_sv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.l %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.l.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i64> undef, i64 %x, i32 0
+  %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp sge <256 x i64> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sge_vs_v256i64(<256 x i64> %x, i64 %y) {
+; CHECK-LABEL: icmp_sge_vs_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i64> undef, i64 %y, i32 0
+  %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp sge <256 x i64> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_eq_vv_v256i64(<256 x i64> %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_eq_vv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.eq %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp eq <256 x i64> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_eq_sv_v256i64(i64 %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_eq_sv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.l.eq %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i64> undef, i64 %x, i32 0
+  %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp eq <256 x i64> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_eq_vs_v256i64(<256 x i64> %x, i64 %y) {
+; CHECK-LABEL: icmp_eq_vs_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.eq %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i64> undef, i64 %y, i32 0
+  %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp eq <256 x i64> %x, %vy
+  ret <256 x i1> %z
+}
+
+define fastcc <256 x i1> @icmp_ne_vv_v256i64(<256 x i64> %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_ne_vv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.ne %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp ne <256 x i64> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ne_sv_v256i64(i64 %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_ne_sv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.l.ne %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i64> undef, i64 %x, i32 0
+  %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp ne <256 x i64> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ne_vs_v256i64(<256 x i64> %x, i64 %y) {
+; CHECK-LABEL: icmp_ne_vs_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.ne %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i64> undef, i64 %y, i32 0
+  %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp ne <256 x i64> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sle_vv_v256i64(<256 x i64> %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_sle_vv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmps.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp sle <256 x i64> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sle_sv_v256i64(i64 %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_sle_sv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.l %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.l.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i64> undef, i64 %x, i32 0
+  %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp sle <256 x i64> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_sle_vs_v256i64(<256 x i64> %x, i64 %y) {
+; CHECK-LABEL: icmp_sle_vs_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i64> undef, i64 %y, i32 0
+  %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp sle <256 x i64> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_slt_vv_v256i64(<256 x i64> %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_slt_vv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmps.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp slt <256 x i64> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_slt_sv_v256i64(i64 %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_slt_sv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.l %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.l.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i64> undef, i64 %x, i32 0
+  %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp slt <256 x i64> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_slt_vs_v256i64(<256 x i64> %x, i64 %y) {
+; CHECK-LABEL: icmp_slt_vs_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmps.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i64> undef, i64 %y, i32 0
+  %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp slt <256 x i64> %x, %vy
+  ret <256 x i1> %z
+}
+
+
+
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ugt_vv_v256i64(<256 x i64> %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_ugt_vv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp ugt <256 x i64> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ugt_sv_v256i64(i64 %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_ugt_sv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.l.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i64> undef, i64 %x, i32 0
+  %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp ugt <256 x i64> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ugt_vs_v256i64(<256 x i64> %x, i64 %y) {
+; CHECK-LABEL: icmp_ugt_vs_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.gt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i64> undef, i64 %y, i32 0
+  %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp ugt <256 x i64> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_uge_vv_v256i64(<256 x i64> %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_uge_vv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp uge <256 x i64> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_uge_sv_v256i64(i64 %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_uge_sv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.l.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i64> undef, i64 %x, i32 0
+  %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp uge <256 x i64> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_uge_vs_v256i64(<256 x i64> %x, i64 %y) {
+; CHECK-LABEL: icmp_uge_vs_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.ge %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i64> undef, i64 %y, i32 0
+  %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp uge <256 x i64> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ule_vv_v256i64(<256 x i64> %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_ule_vv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp ule <256 x i64> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ule_sv_v256i64(i64 %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_ule_sv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.l.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i64> undef, i64 %x, i32 0
+  %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp ule <256 x i64> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ule_vs_v256i64(<256 x i64> %x, i64 %y) {
+; CHECK-LABEL: icmp_ule_vs_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.le %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i64> undef, i64 %y, i32 0
+  %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp ule <256 x i64> %x, %vy
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ult_vv_v256i64(<256 x i64> %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_ult_vv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s0, 256
+; CHECK-NEXT:    lvl %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %z = icmp ult <256 x i64> %x, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ult_sv_v256i64(i64 %x, <256 x i64> %y) {
+; CHECK-LABEL: icmp_ult_sv_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v1, %v0
+; CHECK-NEXT:    vfmk.l.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %xins = insertelement <256 x i64> undef, i64 %x, i32 0
+  %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp ult <256 x i64> %vx, %y
+  ret <256 x i1> %z
+}
+
+; Function Attrs: nounwind
+define fastcc <256 x i1> @icmp_ult_vs_v256i64(<256 x i64> %x, i64 %y) {
+; CHECK-LABEL: icmp_ult_vs_v256i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lea %s1, 256
+; CHECK-NEXT:    lvl %s1
+; CHECK-NEXT:    vbrd %v1, %s0
+; CHECK-NEXT:    vcmpu.l %v0, %v0, %v1
+; CHECK-NEXT:    vfmk.l.lt %vm1, %v0
+; CHECK-NEXT:    b.l.t (, %s10)
+  %yins = insertelement <256 x i64> undef, i64 %y, i32 0
+  %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer
+  %z = icmp ult <256 x i64> %x, %vy
+  ret <256 x i1> %z
+}


        


More information about the llvm-commits mailing list