[llvm] ffdda49 - [Target][ARM] Add PerformVSELECTCombine for MVE Integer Ops

via llvm-commits llvm-commits at lists.llvm.org
Tue May 5 02:04:22 PDT 2020


Author: Pierre-vh
Date: 2020-05-05T10:03:02+01:00
New Revision: ffdda495f79a3147c4b351b323235ec429cc4f7d

URL: https://github.com/llvm/llvm-project/commit/ffdda495f79a3147c4b351b323235ec429cc4f7d
DIFF: https://github.com/llvm/llvm-project/commit/ffdda495f79a3147c4b351b323235ec429cc4f7d.diff

LOG: [Target][ARM] Add PerformVSELECTCombine for MVE Integer Ops

This patch adds an implementation of PerformVSELECTCombine in the
ARM DAG Combiner that transforms vselect(not(cond), lhs, rhs) into
vselect(cond, rhs, lhs).

Normally, this should be done by the target-independent DAG Combiner,
but it doesn't handle the kind of constants that we generate, so we
have to reimplement it here.

Differential Revision: https://reviews.llvm.org/D77712

Added: 
    

Modified: 
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/test/CodeGen/Thumb2/mve-pred-or.ll
    llvm/test/CodeGen/Thumb2/mve-vcmpf.ll
    llvm/test/CodeGen/Thumb2/mve-vcmpfr.ll
    llvm/test/CodeGen/Thumb2/mve-vcmpfz.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 3d6841ed7ce4..f484da1d44f9 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -1460,6 +1460,9 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
   setTargetDAGCombine(ISD::OR);
   setTargetDAGCombine(ISD::XOR);
 
+  if (Subtarget->hasMVEIntegerOps())
+    setTargetDAGCombine(ISD::VSELECT);
+
   if (Subtarget->hasV6Ops())
     setTargetDAGCombine(ISD::SRL);
   if (Subtarget->isThumb1Only())
@@ -11719,6 +11722,42 @@ static SDValue PerformAddeSubeCombine(SDNode *N,
   return SDValue();
 }
 
+static SDValue PerformVSELECTCombine(SDNode *N,
+                                     TargetLowering::DAGCombinerInfo &DCI,
+                                     const ARMSubtarget *Subtarget) {
+  // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs).
+  //
+  // We need to re-implement this optimization here as the implementation in the
+  // Target-Independent DAGCombiner does not handle the kind of constant we make
+  // (it calls isConstOrConstSplat with AllowTruncation set to false - and for
+  // good reason, allowing truncation there would break other targets).
+  //
+  // Currently, this is only done for MVE, as it's the only target that benefits
+  // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL).
+  if (!Subtarget->hasMVEIntegerOps())
+    return SDValue();
+
+  if (N->getOperand(0).getOpcode() != ISD::XOR)
+    return SDValue();
+  SDValue XOR = N->getOperand(0);
+
+  // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s.
+  // It is important to check with truncation allowed as the BUILD_VECTORs we
+  // generate in those situations will truncate their operands.
+  ConstantSDNode *Const =
+      isConstOrConstSplat(XOR->getOperand(1), /*AllowUndefs*/ false,
+                          /*AllowTruncation*/ true);
+  if (!Const || !Const->isOne())
+    return SDValue();
+
+  // Rewrite into vselect(cond, rhs, lhs).
+  SDValue Cond = XOR->getOperand(0);
+  SDValue LHS = N->getOperand(1);
+  SDValue RHS = N->getOperand(2);
+  EVT Type = N->getValueType(0);
+  return DCI.DAG.getNode(ISD::VSELECT, SDLoc(N), Type, Cond, RHS, LHS);
+}
+
 static SDValue PerformABSCombine(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const ARMSubtarget *Subtarget) {
@@ -15225,6 +15264,7 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
                                              DAGCombinerInfo &DCI) const {
   switch (N->getOpcode()) {
   default: break;
+  case ISD::VSELECT:    return PerformVSELECTCombine(N, DCI, Subtarget);
   case ISD::ABS:        return PerformABSCombine(N, DCI, Subtarget);
   case ARMISD::ADDE:    return PerformADDECombine(N, DCI, Subtarget);
   case ARMISD::UMLAL:   return PerformUMLALCombine(N, DCI.DAG, Subtarget);

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-or.ll b/llvm/test/CodeGen/Thumb2/mve-pred-or.ll
index 4e9e074083f5..f93cc66b5025 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-or.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-or.ll
@@ -6,8 +6,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpeqz_v4i1(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.i32 ne, q1, zr
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -22,8 +21,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpnez_v4i1(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.i32 eq, q1, zr
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -38,8 +36,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpsltz_v4i1(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.s32 ge, q1, zr
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -54,8 +51,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpsgtz_v4i1(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.s32 le, q1, zr
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -70,8 +66,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpslez_v4i1(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.s32 gt, q1, zr
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -86,8 +81,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpsgez_v4i1(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.s32 lt, q1, zr
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -116,8 +110,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpugtz_v4i1(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.i32 eq, q1, zr
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -165,8 +158,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpeq_v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i3
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.i32 ne, q1, q2
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -181,8 +173,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpne_v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i3
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.i32 eq, q1, q2
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -197,8 +188,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpslt_v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.s32 le, q2, q1
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -213,8 +203,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpsgt_v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.s32 le, q1, q2
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -229,8 +218,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpsle_v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.s32 lt, q2, q1
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -245,8 +233,7 @@ define arm_aapcs_vfpcc <4 x i32> @cmpsge_v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i32 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.s32 lt, q1, q2
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -340,8 +327,7 @@ define arm_aapcs_vfpcc <8 x i16> @cmpeqz_v8i1(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i16 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.i16 ne, q1, zr
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <8 x i16> %a, zeroinitializer
@@ -356,8 +342,7 @@ define arm_aapcs_vfpcc <8 x i16> @cmpeq_v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i1
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i16 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.i16 ne, q1, q2
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <8 x i16> %a, zeroinitializer
@@ -373,8 +358,7 @@ define arm_aapcs_vfpcc <16 x i8> @cmpeqz_v16i1(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i8 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.i8 ne, q1, zr
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <16 x i8> %a, zeroinitializer
@@ -389,8 +373,7 @@ define arm_aapcs_vfpcc <16 x i8> @cmpeq_v16i1(<16 x i8> %a, <16 x i8> %b, <16 x
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vpt.i8 ne, q0, zr
 ; CHECK-NEXT:    vcmpt.i8 ne, q1, q2
-; CHECK-NEXT:    vpnot
-; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vpsel q0, q1, q0
 ; CHECK-NEXT:    bx lr
 entry:
   %c1 = icmp eq <16 x i8> %a, zeroinitializer

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vcmpf.ll b/llvm/test/CodeGen/Thumb2/mve-vcmpf.ll
index 7c60eeadbfb0..f9e9a46e638d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vcmpf.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vcmpf.ll
@@ -109,8 +109,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_one_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f32 le, q1, q0
 ; CHECK-MVEFP-NEXT:    vcmpt.f32 le, q0, q1
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp one <4 x float> %src, %src2
@@ -485,8 +484,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ugt_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP-LABEL: vcmp_ugt_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 ge, q1, q0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ugt <4 x float> %src, %src2
@@ -538,8 +536,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_uge_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP-LABEL: vcmp_uge_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 gt, q1, q0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp uge <4 x float> %src, %src2
@@ -591,8 +588,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ult_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP-LABEL: vcmp_ult_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 ge, q0, q1
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ult <4 x float> %src, %src2
@@ -644,8 +640,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ule_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP-LABEL: vcmp_ule_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 gt, q0, q1
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ule <4 x float> %src, %src2
@@ -698,8 +693,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ord_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f32 le, q1, q0
 ; CHECK-MVEFP-NEXT:    vcmpt.f32 lt, q0, q1
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ord <4 x float> %src, %src2
@@ -1019,8 +1013,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_one_v8f16(<8 x half> %src, <8 x half> %s
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f16 le, q1, q0
 ; CHECK-MVEFP-NEXT:    vcmpt.f16 le, q0, q1
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp one <8 x half> %src, %src2
@@ -1905,8 +1898,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %s
 ; CHECK-MVEFP-LABEL: vcmp_ugt_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 ge, q1, q0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ugt <8 x half> %src, %src2
@@ -2030,8 +2022,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %s
 ; CHECK-MVEFP-LABEL: vcmp_uge_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 gt, q1, q0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp uge <8 x half> %src, %src2
@@ -2155,8 +2146,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %s
 ; CHECK-MVEFP-LABEL: vcmp_ult_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 ge, q0, q1
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ult <8 x half> %src, %src2
@@ -2280,8 +2270,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %s
 ; CHECK-MVEFP-LABEL: vcmp_ule_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 gt, q0, q1
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ule <8 x half> %src, %src2
@@ -2406,8 +2395,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %s
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f16 le, q1, q0
 ; CHECK-MVEFP-NEXT:    vcmpt.f16 lt, q0, q1
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ord <8 x half> %src, %src2

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vcmpfr.ll b/llvm/test/CodeGen/Thumb2/mve-vcmpfr.ll
index f03034c42baf..cf9757b86d77 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vcmpfr.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vcmpfr.ll
@@ -113,8 +113,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_one_v4f32(<4 x float> %src, float %src2
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vpt.f32 ge, q0, r0
 ; CHECK-MVEFP-NEXT:    vcmpt.f32 le, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -510,8 +509,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ugt_v4f32(<4 x float> %src, float %src2
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vcmp.f32 le, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -566,8 +564,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_uge_v4f32(<4 x float> %src, float %src2
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vcmp.f32 lt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -622,8 +619,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ult_v4f32(<4 x float> %src, float %src2
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vcmp.f32 ge, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -678,8 +674,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ule_v4f32(<4 x float> %src, float %src2
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vcmp.f32 gt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -735,8 +730,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ord_v4f32(<4 x float> %src, float %src2
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vpt.f32 ge, q0, r0
 ; CHECK-MVEFP-NEXT:    vcmpt.f32 lt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -1060,8 +1054,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_one_v8f16(<8 x half> %src, half* %src2p,
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vpt.f16 ge, q0, r0
 ; CHECK-MVEFP-NEXT:    vcmpt.f16 le, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p
@@ -1953,8 +1946,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, half* %src2p,
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vcmp.f16 le, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p
@@ -2079,8 +2071,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, half* %src2p,
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vcmp.f16 lt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p
@@ -2205,8 +2196,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, half* %src2p,
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vcmp.f16 ge, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p
@@ -2331,8 +2321,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, half* %src2p,
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vcmp.f16 gt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p
@@ -2458,8 +2447,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, half* %src2p,
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vpt.f16 ge, q0, r0
 ; CHECK-MVEFP-NEXT:    vcmpt.f16 lt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p
@@ -2710,8 +2698,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_one_v4f32(<4 x float> %src, float %sr
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vpt.f32 le, q0, r0
 ; CHECK-MVEFP-NEXT:    vcmpt.f32 ge, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -3107,8 +3094,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_ugt_v4f32(<4 x float> %src, float %sr
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vcmp.f32 ge, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -3163,8 +3149,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_uge_v4f32(<4 x float> %src, float %sr
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vcmp.f32 gt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -3219,8 +3204,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_ult_v4f32(<4 x float> %src, float %sr
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vcmp.f32 le, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -3275,8 +3259,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_ule_v4f32(<4 x float> %src, float %sr
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vcmp.f32 lt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -3332,8 +3315,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_ord_v4f32(<4 x float> %src, float %sr
 ; CHECK-MVEFP-NEXT:    vmov r0, s4
 ; CHECK-MVEFP-NEXT:    vpt.f32 le, q0, r0
 ; CHECK-MVEFP-NEXT:    vcmpt.f32 gt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q2, q3
+; CHECK-MVEFP-NEXT:    vpsel q0, q3, q2
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %i = insertelement <4 x float> undef, float %src2, i32 0
@@ -3657,8 +3639,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_one_v8f16(<8 x half> %src, half* %src2
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vpt.f16 le, q0, r0
 ; CHECK-MVEFP-NEXT:    vcmpt.f16 ge, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p
@@ -4550,8 +4531,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_ugt_v8f16(<8 x half> %src, half* %src2
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vcmp.f16 ge, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p
@@ -4676,8 +4656,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_uge_v8f16(<8 x half> %src, half* %src2
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vcmp.f16 gt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p
@@ -4802,8 +4781,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_ult_v8f16(<8 x half> %src, half* %src2
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vcmp.f16 le, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p
@@ -4928,8 +4906,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_ule_v8f16(<8 x half> %src, half* %src2
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vcmp.f16 lt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p
@@ -5055,8 +5032,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_ord_v8f16(<8 x half> %src, half* %src2
 ; CHECK-MVEFP-NEXT:    ldrh r0, [r0]
 ; CHECK-MVEFP-NEXT:    vpt.f16 le, q0, r0
 ; CHECK-MVEFP-NEXT:    vcmpt.f16 gt, q0, r0
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %src2 = load half, half* %src2p

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vcmpfz.ll b/llvm/test/CodeGen/Thumb2/mve-vcmpfz.ll
index 20837421e971..574be119908d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vcmpfz.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vcmpfz.ll
@@ -109,8 +109,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_one_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f32 ge, q0, zr
 ; CHECK-MVEFP-NEXT:    vcmpt.f32 le, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp one <4 x float> %src, zeroinitializer
@@ -485,8 +484,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ugt_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP-LABEL: vcmp_ugt_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 le, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ugt <4 x float> %src, zeroinitializer
@@ -538,8 +536,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_uge_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP-LABEL: vcmp_uge_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 lt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp uge <4 x float> %src, zeroinitializer
@@ -591,8 +588,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ult_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP-LABEL: vcmp_ult_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 ge, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ult <4 x float> %src, zeroinitializer
@@ -644,8 +640,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ule_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP-LABEL: vcmp_ule_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 gt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ule <4 x float> %src, zeroinitializer
@@ -698,8 +693,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_ord_v4f32(<4 x float> %src, <4 x float>
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f32 ge, q0, zr
 ; CHECK-MVEFP-NEXT:    vcmpt.f32 lt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ord <4 x float> %src, zeroinitializer
@@ -1011,8 +1005,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_one_v8f16(<8 x half> %src, <8 x half> %a
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f16 ge, q0, zr
 ; CHECK-MVEFP-NEXT:    vcmpt.f16 le, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp one <8 x half> %src, zeroinitializer
@@ -1869,8 +1862,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %a
 ; CHECK-MVEFP-LABEL: vcmp_ugt_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 le, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ugt <8 x half> %src, zeroinitializer
@@ -1990,8 +1982,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %a
 ; CHECK-MVEFP-LABEL: vcmp_uge_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 lt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp uge <8 x half> %src, zeroinitializer
@@ -2111,8 +2102,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %a
 ; CHECK-MVEFP-LABEL: vcmp_ult_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 ge, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ult <8 x half> %src, zeroinitializer
@@ -2232,8 +2222,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %a
 ; CHECK-MVEFP-LABEL: vcmp_ule_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 gt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ule <8 x half> %src, zeroinitializer
@@ -2354,8 +2343,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %a
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f16 ge, q0, zr
 ; CHECK-MVEFP-NEXT:    vcmpt.f16 lt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ord <8 x half> %src, zeroinitializer
@@ -2594,8 +2582,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_one_v4f32(<4 x float> %src, <4 x floa
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f32 le, q0, zr
 ; CHECK-MVEFP-NEXT:    vcmpt.f32 ge, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp one <4 x float> zeroinitializer, %src
@@ -2970,8 +2957,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_ugt_v4f32(<4 x float> %src, <4 x floa
 ; CHECK-MVEFP-LABEL: vcmp_r_ugt_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 ge, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ugt <4 x float> zeroinitializer, %src
@@ -3023,8 +3009,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_uge_v4f32(<4 x float> %src, <4 x floa
 ; CHECK-MVEFP-LABEL: vcmp_r_uge_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 gt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp uge <4 x float> zeroinitializer, %src
@@ -3076,8 +3061,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_ult_v4f32(<4 x float> %src, <4 x floa
 ; CHECK-MVEFP-LABEL: vcmp_r_ult_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 le, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ult <4 x float> zeroinitializer, %src
@@ -3129,8 +3113,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_ule_v4f32(<4 x float> %src, <4 x floa
 ; CHECK-MVEFP-LABEL: vcmp_r_ule_v4f32:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f32 lt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ule <4 x float> zeroinitializer, %src
@@ -3183,8 +3166,7 @@ define arm_aapcs_vfpcc <4 x float> @vcmp_r_ord_v4f32(<4 x float> %src, <4 x floa
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f32 le, q0, zr
 ; CHECK-MVEFP-NEXT:    vcmpt.f32 gt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ord <4 x float> zeroinitializer, %src
@@ -3496,8 +3478,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_one_v8f16(<8 x half> %src, <8 x half>
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f16 le, q0, zr
 ; CHECK-MVEFP-NEXT:    vcmpt.f16 ge, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp one <8 x half> zeroinitializer, %src
@@ -4354,8 +4335,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_ugt_v8f16(<8 x half> %src, <8 x half>
 ; CHECK-MVEFP-LABEL: vcmp_r_ugt_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 ge, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ugt <8 x half> zeroinitializer, %src
@@ -4475,8 +4455,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_uge_v8f16(<8 x half> %src, <8 x half>
 ; CHECK-MVEFP-LABEL: vcmp_r_uge_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 gt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp uge <8 x half> zeroinitializer, %src
@@ -4596,8 +4575,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_ult_v8f16(<8 x half> %src, <8 x half>
 ; CHECK-MVEFP-LABEL: vcmp_r_ult_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 le, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ult <8 x half> zeroinitializer, %src
@@ -4717,8 +4695,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_ule_v8f16(<8 x half> %src, <8 x half>
 ; CHECK-MVEFP-LABEL: vcmp_r_ule_v8f16:
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vcmp.f16 lt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ule <8 x half> zeroinitializer, %src
@@ -4839,8 +4816,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_r_ord_v8f16(<8 x half> %src, <8 x half>
 ; CHECK-MVEFP:       @ %bb.0: @ %entry
 ; CHECK-MVEFP-NEXT:    vpt.f16 le, q0, zr
 ; CHECK-MVEFP-NEXT:    vcmpt.f16 gt, q0, zr
-; CHECK-MVEFP-NEXT:    vpnot
-; CHECK-MVEFP-NEXT:    vpsel q0, q1, q2
+; CHECK-MVEFP-NEXT:    vpsel q0, q2, q1
 ; CHECK-MVEFP-NEXT:    bx lr
 entry:
   %c = fcmp ord <8 x half> zeroinitializer, %src


        


More information about the llvm-commits mailing list