[llvm] [ARM] Port shouldBeAdjustedToZero to ARM (PR #147565)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 8 12:53:16 PDT 2025


https://github.com/AZero13 updated https://github.com/llvm/llvm-project/pull/147565

>From b9fcf263cc2d4c751bd1d3185201168b5c617ef1 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Tue, 8 Jul 2025 12:45:49 -0400
Subject: [PATCH] [ARM] Port shouldBeAdjustedToZero to ARM

---
 llvm/lib/Target/ARM/ARMISelLowering.cpp       |  35 +-
 .../CodeGen/ARM/arm-shrink-wrapping-linux.ll  |  34 +-
 llvm/test/CodeGen/ARM/consthoist-icmpimm.ll   | 144 ++--
 llvm/test/CodeGen/ARM/select-constant-xor.ll  |  14 +-
 llvm/test/CodeGen/ARM/sub-cmp-peephole.ll     |   5 +-
 llvm/test/CodeGen/ARM/vsel.ll                 | 775 +++++++++++++-----
 llvm/test/CodeGen/Thumb2/mve-memtp-loop.ll    |   6 +-
 .../CodeGen/Thumb2/pacbti-m-outliner-4.ll     |   4 +-
 8 files changed, 712 insertions(+), 305 deletions(-)

diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 6b85e62d2eb8b..933cf905da686 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -4839,14 +4839,45 @@ static bool isFloatingPointZero(SDValue Op) {
   return false;
 }
 
+static bool shouldBeAdjustedToZero(SDValue LHS, APInt C, ISD::CondCode &CC) {
+  // setlt and setge are changed to MI and PL for zero respectively, so it is
+  // safe.
+  if (C.isAllOnes() && (CC == ISD::SETLE || CC == ISD::SETGT)) {
+    CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
+    return true;
+  }
+
+  // On ARM, adds and subs set the V flags correctly, which means the optimizer
+  // can condense to a single adds/subs
+  switch (LHS.getOpcode()) {
+  case ISD::ADD:
+  case ISD::SUB:
+    break;
+  default:
+    return false;
+  }
+
+  if (C.isOne() && (CC == ISD::SETLT || CC == ISD::SETGE)) {
+    CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
+    return true;
+  }
+
+  return false;
+}
+
 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
 /// the given operands.
 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
                                      SDValue &ARMcc, SelectionDAG &DAG,
                                      const SDLoc &dl) const {
   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
-    unsigned C = RHSC->getZExtValue();
-    if (!isLegalICmpImmediate((int32_t)C)) {
+    APInt CInt = RHSC->getAPIntValue();
+    unsigned C = CInt.getZExtValue();
+    if (shouldBeAdjustedToZero(LHS, CInt, CC)) {
+      // Adjust the constant to zero.
+      // CC has already been adjusted.
+      RHS = DAG.getConstant(0, dl, MVT::i32);
+    } else if (!isLegalICmpImmediate((int32_t)C)) {
       // Constant does not fit, try adjusting it by one.
       switch (CC) {
       default: break;
diff --git a/llvm/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll b/llvm/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll
index b92f03d43bb4c..a33c5ed6cdfaa 100644
--- a/llvm/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll
+++ b/llvm/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll
@@ -19,10 +19,9 @@ define fastcc ptr @wrongUseOfPostDominate(ptr readonly %s, i32 %off, ptr readnon
 ; ENABLE:       @ %bb.0: @ %entry
 ; ENABLE-NEXT:    .save {r11, lr}
 ; ENABLE-NEXT:    push {r11, lr}
-; ENABLE-NEXT:    cmn r1, #1
-; ENABLE-NEXT:    ble .LBB0_7
-; ENABLE-NEXT:  @ %bb.1: @ %while.cond.preheader
 ; ENABLE-NEXT:    cmp r1, #0
+; ENABLE-NEXT:    bmi .LBB0_7
+; ENABLE-NEXT:  @ %bb.1: @ %while.cond.preheader
 ; ENABLE-NEXT:    beq .LBB0_6
 ; ENABLE-NEXT:  @ %bb.2: @ %while.cond.preheader
 ; ENABLE-NEXT:    cmp r0, r2
@@ -66,16 +65,16 @@ define fastcc ptr @wrongUseOfPostDominate(ptr readonly %s, i32 %off, ptr readnon
 ; ENABLE-NEXT:    mov r0, r3
 ; ENABLE-NEXT:    ldrb r12, [r0, #-1]!
 ; ENABLE-NEXT:    sxtb lr, r12
-; ENABLE-NEXT:    cmn lr, #1
-; ENABLE-NEXT:    bgt .LBB0_7
+; ENABLE-NEXT:    cmp lr, #0
+; ENABLE-NEXT:    bpl .LBB0_7
 ; ENABLE-NEXT:  @ %bb.11: @ %if.then7
 ; ENABLE-NEXT:    @ in Loop: Header=BB0_7 Depth=1
 ; ENABLE-NEXT:    cmp r0, r2
 ; ENABLE-NEXT:    bls .LBB0_7
 ; ENABLE-NEXT:  @ %bb.12: @ %land.rhs14.preheader
 ; ENABLE-NEXT:    @ in Loop: Header=BB0_7 Depth=1
-; ENABLE-NEXT:    cmn lr, #1
-; ENABLE-NEXT:    bgt .LBB0_7
+; ENABLE-NEXT:    cmp lr, #0
+; ENABLE-NEXT:    bpl .LBB0_7
 ; ENABLE-NEXT:  @ %bb.13: @ %land.rhs14.preheader
 ; ENABLE-NEXT:    @ in Loop: Header=BB0_7 Depth=1
 ; ENABLE-NEXT:    cmp r12, #191
@@ -93,9 +92,9 @@ define fastcc ptr @wrongUseOfPostDominate(ptr readonly %s, i32 %off, ptr readnon
 ; ENABLE-NEXT:    @ in Loop: Header=BB0_15 Depth=2
 ; ENABLE-NEXT:    mov r3, r0
 ; ENABLE-NEXT:    ldrsb lr, [r3], #-1
-; ENABLE-NEXT:    cmn lr, #1
+; ENABLE-NEXT:    cmp lr, #0
 ; ENABLE-NEXT:    uxtb r12, lr
-; ENABLE-NEXT:    bgt .LBB0_7
+; ENABLE-NEXT:    bpl .LBB0_7
 ; ENABLE-NEXT:  @ %bb.17: @ %while.body24.land.rhs14_crit_edge
 ; ENABLE-NEXT:    @ in Loop: Header=BB0_15 Depth=2
 ; ENABLE-NEXT:    cmp r12, #192
@@ -109,10 +108,9 @@ define fastcc ptr @wrongUseOfPostDominate(ptr readonly %s, i32 %off, ptr readnon
 ; DISABLE:       @ %bb.0: @ %entry
 ; DISABLE-NEXT:    .save {r11, lr}
 ; DISABLE-NEXT:    push {r11, lr}
-; DISABLE-NEXT:    cmn r1, #1
-; DISABLE-NEXT:    ble .LBB0_7
-; DISABLE-NEXT:  @ %bb.1: @ %while.cond.preheader
 ; DISABLE-NEXT:    cmp r1, #0
+; DISABLE-NEXT:    bmi .LBB0_7
+; DISABLE-NEXT:  @ %bb.1: @ %while.cond.preheader
 ; DISABLE-NEXT:    beq .LBB0_6
 ; DISABLE-NEXT:  @ %bb.2: @ %while.cond.preheader
 ; DISABLE-NEXT:    cmp r0, r2
@@ -156,16 +154,16 @@ define fastcc ptr @wrongUseOfPostDominate(ptr readonly %s, i32 %off, ptr readnon
 ; DISABLE-NEXT:    mov r0, r3
 ; DISABLE-NEXT:    ldrb r12, [r0, #-1]!
 ; DISABLE-NEXT:    sxtb lr, r12
-; DISABLE-NEXT:    cmn lr, #1
-; DISABLE-NEXT:    bgt .LBB0_7
+; DISABLE-NEXT:    cmp lr, #0
+; DISABLE-NEXT:    bpl .LBB0_7
 ; DISABLE-NEXT:  @ %bb.11: @ %if.then7
 ; DISABLE-NEXT:    @ in Loop: Header=BB0_7 Depth=1
 ; DISABLE-NEXT:    cmp r0, r2
 ; DISABLE-NEXT:    bls .LBB0_7
 ; DISABLE-NEXT:  @ %bb.12: @ %land.rhs14.preheader
 ; DISABLE-NEXT:    @ in Loop: Header=BB0_7 Depth=1
-; DISABLE-NEXT:    cmn lr, #1
-; DISABLE-NEXT:    bgt .LBB0_7
+; DISABLE-NEXT:    cmp lr, #0
+; DISABLE-NEXT:    bpl .LBB0_7
 ; DISABLE-NEXT:  @ %bb.13: @ %land.rhs14.preheader
 ; DISABLE-NEXT:    @ in Loop: Header=BB0_7 Depth=1
 ; DISABLE-NEXT:    cmp r12, #191
@@ -183,9 +181,9 @@ define fastcc ptr @wrongUseOfPostDominate(ptr readonly %s, i32 %off, ptr readnon
 ; DISABLE-NEXT:    @ in Loop: Header=BB0_15 Depth=2
 ; DISABLE-NEXT:    mov r3, r0
 ; DISABLE-NEXT:    ldrsb lr, [r3], #-1
-; DISABLE-NEXT:    cmn lr, #1
+; DISABLE-NEXT:    cmp lr, #0
 ; DISABLE-NEXT:    uxtb r12, lr
-; DISABLE-NEXT:    bgt .LBB0_7
+; DISABLE-NEXT:    bpl .LBB0_7
 ; DISABLE-NEXT:  @ %bb.17: @ %while.body24.land.rhs14_crit_edge
 ; DISABLE-NEXT:    @ in Loop: Header=BB0_15 Depth=2
 ; DISABLE-NEXT:    cmp r12, #192
diff --git a/llvm/test/CodeGen/ARM/consthoist-icmpimm.ll b/llvm/test/CodeGen/ARM/consthoist-icmpimm.ll
index 16b7403bdb932..2f9a8ed997feb 100644
--- a/llvm/test/CodeGen/ARM/consthoist-icmpimm.ll
+++ b/llvm/test/CodeGen/ARM/consthoist-icmpimm.ll
@@ -44,19 +44,19 @@ define i32 @icmp64_sge_0(i64 %x, i64 %y, i32 %a, i32 %b, i1 %c) {
 ; CHECKV7M-NEXT:    ldrd r2, r0, [sp]
 ; CHECKV7M-NEXT:    beq .LBB0_2
 ; CHECKV7M-NEXT:  @ %bb.1: @ %then
-; CHECKV7M-NEXT:    cmp.w r3, #-1
+; CHECKV7M-NEXT:    cmp r3, #0
 ; CHECKV7M-NEXT:    mov r3, r0
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r3, r2
-; CHECKV7M-NEXT:    cmp.w r1, #-1
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r0, r2
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r3, r2
+; CHECKV7M-NEXT:    cmp r1, #0
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r0, r2
 ; CHECKV7M-NEXT:    add r0, r3
 ; CHECKV7M-NEXT:    bx lr
 ; CHECKV7M-NEXT:  .LBB0_2: @ %else
-; CHECKV7M-NEXT:    cmp.w r1, #-1
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r0, r2
+; CHECKV7M-NEXT:    cmp r1, #0
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r0, r2
 ; CHECKV7M-NEXT:    bx lr
 ;
 ; CHECKV7A-LABEL: icmp64_sge_0:
@@ -66,19 +66,19 @@ define i32 @icmp64_sge_0(i64 %x, i64 %y, i32 %a, i32 %b, i1 %c) {
 ; CHECKV7A-NEXT:    lsls r2, r2, #31
 ; CHECKV7A-NEXT:    beq .LBB0_2
 ; CHECKV7A-NEXT:  @ %bb.1: @ %then
-; CHECKV7A-NEXT:    cmp.w r3, #-1
+; CHECKV7A-NEXT:    cmp r3, #0
 ; CHECKV7A-NEXT:    mov r2, r0
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r2, r12
-; CHECKV7A-NEXT:    cmp.w r1, #-1
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r0, r12
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r2, r12
+; CHECKV7A-NEXT:    cmp r1, #0
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r0, r12
 ; CHECKV7A-NEXT:    add r0, r2
 ; CHECKV7A-NEXT:    bx lr
 ; CHECKV7A-NEXT:  .LBB0_2: @ %else
-; CHECKV7A-NEXT:    cmp.w r1, #-1
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r0, r12
+; CHECKV7A-NEXT:    cmp r1, #0
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r0, r12
 ; CHECKV7A-NEXT:    bx lr
   br i1 %c, label %then, label %else
 then:
@@ -135,19 +135,19 @@ define i32 @icmp64_sgt_m1(i64 %x, i64 %y, i32 %a, i32 %b, i1 %c) {
 ; CHECKV7M-NEXT:    ldrd r2, r0, [sp]
 ; CHECKV7M-NEXT:    beq .LBB1_2
 ; CHECKV7M-NEXT:  @ %bb.1: @ %then
-; CHECKV7M-NEXT:    cmp.w r3, #-1
+; CHECKV7M-NEXT:    cmp r3, #0
 ; CHECKV7M-NEXT:    mov r3, r0
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r3, r2
-; CHECKV7M-NEXT:    cmp.w r1, #-1
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r0, r2
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r3, r2
+; CHECKV7M-NEXT:    cmp r1, #0
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r0, r2
 ; CHECKV7M-NEXT:    add r0, r3
 ; CHECKV7M-NEXT:    bx lr
 ; CHECKV7M-NEXT:  .LBB1_2: @ %else
-; CHECKV7M-NEXT:    cmp.w r3, #-1
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r0, r2
+; CHECKV7M-NEXT:    cmp r3, #0
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r0, r2
 ; CHECKV7M-NEXT:    bx lr
 ;
 ; CHECKV7A-LABEL: icmp64_sgt_m1:
@@ -157,19 +157,19 @@ define i32 @icmp64_sgt_m1(i64 %x, i64 %y, i32 %a, i32 %b, i1 %c) {
 ; CHECKV7A-NEXT:    lsls r2, r2, #31
 ; CHECKV7A-NEXT:    beq .LBB1_2
 ; CHECKV7A-NEXT:  @ %bb.1: @ %then
-; CHECKV7A-NEXT:    cmp.w r3, #-1
+; CHECKV7A-NEXT:    cmp r3, #0
 ; CHECKV7A-NEXT:    mov r2, r0
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r2, r12
-; CHECKV7A-NEXT:    cmp.w r1, #-1
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r0, r12
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r2, r12
+; CHECKV7A-NEXT:    cmp r1, #0
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r0, r12
 ; CHECKV7A-NEXT:    add r0, r2
 ; CHECKV7A-NEXT:    bx lr
 ; CHECKV7A-NEXT:  .LBB1_2: @ %else
-; CHECKV7A-NEXT:    cmp.w r3, #-1
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r0, r12
+; CHECKV7A-NEXT:    cmp r3, #0
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r0, r12
 ; CHECKV7A-NEXT:    bx lr
   br i1 %c, label %then, label %else
 then:
@@ -227,19 +227,19 @@ define i32 @icmp32_sge_0(i32 %x, i32 %y, i32 %a, i32 %b, i1 %c) {
 ; CHECKV7M-NEXT:    lsls r3, r3, #31
 ; CHECKV7M-NEXT:    beq .LBB2_2
 ; CHECKV7M-NEXT:  @ %bb.1: @ %then
-; CHECKV7M-NEXT:    cmp.w r1, #-1
+; CHECKV7M-NEXT:    cmp r1, #0
 ; CHECKV7M-NEXT:    mov r1, r12
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r1, r2
-; CHECKV7M-NEXT:    cmp.w r0, #-1
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r12, r2
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r1, r2
+; CHECKV7M-NEXT:    cmp r0, #0
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r12, r2
 ; CHECKV7M-NEXT:    add.w r0, r12, r1
 ; CHECKV7M-NEXT:    bx lr
 ; CHECKV7M-NEXT:  .LBB2_2: @ %else
-; CHECKV7M-NEXT:    cmp.w r0, #-1
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r12, r2
+; CHECKV7M-NEXT:    cmp r0, #0
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r12, r2
 ; CHECKV7M-NEXT:    mov r0, r12
 ; CHECKV7M-NEXT:    bx lr
 ;
@@ -250,19 +250,19 @@ define i32 @icmp32_sge_0(i32 %x, i32 %y, i32 %a, i32 %b, i1 %c) {
 ; CHECKV7A-NEXT:    lsls r3, r3, #31
 ; CHECKV7A-NEXT:    beq .LBB2_2
 ; CHECKV7A-NEXT:  @ %bb.1: @ %then
-; CHECKV7A-NEXT:    cmp.w r1, #-1
+; CHECKV7A-NEXT:    cmp r1, #0
 ; CHECKV7A-NEXT:    mov r1, r12
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r1, r2
-; CHECKV7A-NEXT:    cmp.w r0, #-1
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r12, r2
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r1, r2
+; CHECKV7A-NEXT:    cmp r0, #0
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r12, r2
 ; CHECKV7A-NEXT:    add.w r0, r12, r1
 ; CHECKV7A-NEXT:    bx lr
 ; CHECKV7A-NEXT:  .LBB2_2: @ %else
-; CHECKV7A-NEXT:    cmp.w r0, #-1
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r12, r2
+; CHECKV7A-NEXT:    cmp r0, #0
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r12, r2
 ; CHECKV7A-NEXT:    mov r0, r12
 ; CHECKV7A-NEXT:    bx lr
   br i1 %c, label %then, label %else
@@ -321,19 +321,19 @@ define i32 @icmp32_sgt_m1(i32 %x, i32 %y, i32 %a, i32 %b, i1 %c) {
 ; CHECKV7M-NEXT:    lsls r3, r3, #31
 ; CHECKV7M-NEXT:    beq .LBB3_2
 ; CHECKV7M-NEXT:  @ %bb.1: @ %then
-; CHECKV7M-NEXT:    cmp.w r1, #-1
+; CHECKV7M-NEXT:    cmp r1, #0
 ; CHECKV7M-NEXT:    mov r1, r12
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r1, r2
-; CHECKV7M-NEXT:    cmp.w r0, #-1
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r12, r2
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r1, r2
+; CHECKV7M-NEXT:    cmp r0, #0
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r12, r2
 ; CHECKV7M-NEXT:    add.w r0, r12, r1
 ; CHECKV7M-NEXT:    bx lr
 ; CHECKV7M-NEXT:  .LBB3_2: @ %else
-; CHECKV7M-NEXT:    cmp.w r1, #-1
-; CHECKV7M-NEXT:    it gt
-; CHECKV7M-NEXT:    movgt r12, r2
+; CHECKV7M-NEXT:    cmp r1, #0
+; CHECKV7M-NEXT:    it ge
+; CHECKV7M-NEXT:    movge r12, r2
 ; CHECKV7M-NEXT:    mov r0, r12
 ; CHECKV7M-NEXT:    bx lr
 ;
@@ -344,19 +344,19 @@ define i32 @icmp32_sgt_m1(i32 %x, i32 %y, i32 %a, i32 %b, i1 %c) {
 ; CHECKV7A-NEXT:    lsls r3, r3, #31
 ; CHECKV7A-NEXT:    beq .LBB3_2
 ; CHECKV7A-NEXT:  @ %bb.1: @ %then
-; CHECKV7A-NEXT:    cmp.w r1, #-1
+; CHECKV7A-NEXT:    cmp r1, #0
 ; CHECKV7A-NEXT:    mov r1, r12
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r1, r2
-; CHECKV7A-NEXT:    cmp.w r0, #-1
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r12, r2
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r1, r2
+; CHECKV7A-NEXT:    cmp r0, #0
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r12, r2
 ; CHECKV7A-NEXT:    add.w r0, r12, r1
 ; CHECKV7A-NEXT:    bx lr
 ; CHECKV7A-NEXT:  .LBB3_2: @ %else
-; CHECKV7A-NEXT:    cmp.w r1, #-1
-; CHECKV7A-NEXT:    it gt
-; CHECKV7A-NEXT:    movgt r12, r2
+; CHECKV7A-NEXT:    cmp r1, #0
+; CHECKV7A-NEXT:    it ge
+; CHECKV7A-NEXT:    movge r12, r2
 ; CHECKV7A-NEXT:    mov r0, r12
 ; CHECKV7A-NEXT:    bx lr
   br i1 %c, label %then, label %else
diff --git a/llvm/test/CodeGen/ARM/select-constant-xor.ll b/llvm/test/CodeGen/ARM/select-constant-xor.ll
index 543ddcd3efac9..2318418d02bd9 100644
--- a/llvm/test/CodeGen/ARM/select-constant-xor.ll
+++ b/llvm/test/CodeGen/ARM/select-constant-xor.ll
@@ -320,8 +320,8 @@ define i32 @icmpasreq(i32 %input, i32 %a, i32 %b) {
 define i32 @icmpasrne(i32 %input, i32 %a, i32 %b) {
 ; CHECK7A-LABEL: icmpasrne:
 ; CHECK7A:       @ %bb.0:
-; CHECK7A-NEXT:    cmn r0, #1
-; CHECK7A-NEXT:    movle r1, r2
+; CHECK7A-NEXT:    cmp r0, #0
+; CHECK7A-NEXT:    movlt r1, r2
 ; CHECK7A-NEXT:    mov r0, r1
 ; CHECK7A-NEXT:    bx lr
 ;
@@ -337,16 +337,16 @@ define i32 @icmpasrne(i32 %input, i32 %a, i32 %b) {
 ;
 ; CHECK7M-LABEL: icmpasrne:
 ; CHECK7M:       @ %bb.0:
-; CHECK7M-NEXT:    cmp.w r0, #-1
-; CHECK7M-NEXT:    it le
-; CHECK7M-NEXT:    movle r1, r2
+; CHECK7M-NEXT:    cmp r0, #0
+; CHECK7M-NEXT:    it lt
+; CHECK7M-NEXT:    movlt r1, r2
 ; CHECK7M-NEXT:    mov r0, r1
 ; CHECK7M-NEXT:    bx lr
 ;
 ; CHECK81M-LABEL: icmpasrne:
 ; CHECK81M:       @ %bb.0:
-; CHECK81M-NEXT:    cmp.w r0, #-1
-; CHECK81M-NEXT:    csel r0, r1, r2, gt
+; CHECK81M-NEXT:    cmp r0, #0
+; CHECK81M-NEXT:    csel r0, r1, r2, ge
 ; CHECK81M-NEXT:    bx lr
   %sh = ashr i32 %input, 31
   %c = icmp ne i32 %sh, -1
diff --git a/llvm/test/CodeGen/ARM/sub-cmp-peephole.ll b/llvm/test/CodeGen/ARM/sub-cmp-peephole.ll
index fb966c29f39a2..f17b2e6db5fa6 100644
--- a/llvm/test/CodeGen/ARM/sub-cmp-peephole.ll
+++ b/llvm/test/CodeGen/ARM/sub-cmp-peephole.ll
@@ -264,9 +264,8 @@ define i32 @cmp_slt0(i32 %a, i32 %b, i32 %x, i32 %y) {
 ; CHECK-NEXT:    movw r0, :lower16:t
 ; CHECK-NEXT:    movt r0, :upper16:t
 ; CHECK-NEXT:    ldr r0, [r0]
-; CHECK-NEXT:    sub r0, r0, #17
-; CHECK-NEXT:    cmn r0, #1
-; CHECK-NEXT:    ble .LBB11_2
+; CHECK-NEXT:    subs r0, r0, #17
+; CHECK-NEXT:    bmi .LBB11_2
 ; CHECK-NEXT:  @ %bb.1: @ %if.else
 ; CHECK-NEXT:    mov r0, #0
 ; CHECK-NEXT:    bl exit
diff --git a/llvm/test/CodeGen/ARM/vsel.ll b/llvm/test/CodeGen/ARM/vsel.ll
index 272e3361bd470..2f126cc4027a5 100644
--- a/llvm/test/CodeGen/ARM/vsel.ll
+++ b/llvm/test/CodeGen/ARM/vsel.ll
@@ -1,606 +1,985 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc < %s -mtriple=armv8-linux-gnueabihf -mattr=+fp-armv8 -float-abi=hard | FileCheck %s
 @varfloat = global float 0.0
 @vardouble = global double 0.0
 define void @test_vsel32sgt(i32 %lhs32, i32 %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32sgt
+; CHECK-LABEL: test_vsel32sgt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r2, :lower16:varfloat
+; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    movt r2, :upper16:varfloat
+; CHECK-NEXT:    vselgt.f32 s0, s0, s1
+; CHECK-NEXT:    vstr s0, [r2]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sgt i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: cmp r0, r1
-; CHECK: vselgt.f32 s0, s0, s1
   ret void
 }
 define void @test_vsel64sgt(i32 %lhs32, i32 %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64sgt
+; CHECK-LABEL: test_vsel64sgt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r2, :lower16:vardouble
+; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    movt r2, :upper16:vardouble
+; CHECK-NEXT:    vselgt.f64 d16, d0, d1
+; CHECK-NEXT:    vstr d16, [r2]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sgt i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: cmp r0, r1
-; CHECK: vselgt.f64 d16, d0, d1
   ret void
 }
 define void @test_vsel32sge(i32 %lhs32, i32 %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32sge
+; CHECK-LABEL: test_vsel32sge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r2, :lower16:varfloat
+; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    movt r2, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s0, s1
+; CHECK-NEXT:    vstr s0, [r2]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sge i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: cmp r0, r1
-; CHECK: vselge.f32 s0, s0, s1
   ret void
 }
 define void @test_vsel64sge(i32 %lhs32, i32 %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64sge
+; CHECK-LABEL: test_vsel64sge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r2, :lower16:vardouble
+; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    movt r2, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d0, d1
+; CHECK-NEXT:    vstr d16, [r2]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sge i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: cmp r0, r1
-; CHECK: vselge.f64 d16, d0, d1
   ret void
 }
 define void @test_vsel32eq(i32 %lhs32, i32 %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32eq
+; CHECK-LABEL: test_vsel32eq:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r2, :lower16:varfloat
+; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    movt r2, :upper16:varfloat
+; CHECK-NEXT:    vseleq.f32 s0, s0, s1
+; CHECK-NEXT:    vstr s0, [r2]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp eq i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: cmp r0, r1
-; CHECK: vseleq.f32 s0, s0, s1
   ret void
 }
 define void @test_vsel64eq(i32 %lhs32, i32 %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64eq
+; CHECK-LABEL: test_vsel64eq:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r2, :lower16:vardouble
+; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    movt r2, :upper16:vardouble
+; CHECK-NEXT:    vseleq.f64 d16, d0, d1
+; CHECK-NEXT:    vstr d16, [r2]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp eq i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: cmp r0, r1
-; CHECK: vseleq.f64 d16, d0, d1
   ret void
 }
 define void @test_vsel32slt(i32 %lhs32, i32 %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32slt
+; CHECK-LABEL: test_vsel32slt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r2, :lower16:varfloat
+; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    movt r2, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s1, s0
+; CHECK-NEXT:    vstr s0, [r2]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp slt i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: cmp r0, r1
-; CHECK: vselge.f32 s0, s1, s0
   ret void
 }
 define void @test_vsel64slt(i32 %lhs32, i32 %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64slt
+; CHECK-LABEL: test_vsel64slt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r2, :lower16:vardouble
+; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    movt r2, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d1, d0
+; CHECK-NEXT:    vstr d16, [r2]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp slt i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: cmp r0, r1
-; CHECK: vselge.f64 d16, d1, d0
   ret void
 }
 define void @test_vsel32sle(i32 %lhs32, i32 %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32sle
+; CHECK-LABEL: test_vsel32sle:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r2, :lower16:varfloat
+; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    movt r2, :upper16:varfloat
+; CHECK-NEXT:    vselgt.f32 s0, s1, s0
+; CHECK-NEXT:    vstr s0, [r2]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sle i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: cmp r0, r1
-; CHECK: vselgt.f32 s0, s1, s0
   ret void
 }
 define void @test_vsel64sle(i32 %lhs32, i32 %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64sle
+; CHECK-LABEL: test_vsel64sle:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r2, :lower16:vardouble
+; CHECK-NEXT:    cmp r0, r1
+; CHECK-NEXT:    movt r2, :upper16:vardouble
+; CHECK-NEXT:    vselgt.f64 d16, d1, d0
+; CHECK-NEXT:    vstr d16, [r2]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sle i32 %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: cmp r0, r1
-; CHECK: vselgt.f64 d16, d1, d0
   ret void
 }
 define void @test_vsel32ogt(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ogt
+; CHECK-LABEL: test_vsel32ogt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselgt.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ogt float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselgt.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64ogt(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ogt
+; CHECK-LABEL: test_vsel64ogt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselgt.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ogt float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselgt.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32oge(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32oge
+; CHECK-LABEL: test_vsel32oge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp oge float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselge.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64oge(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64oge
+; CHECK-LABEL: test_vsel64oge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp oge float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselge.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32oeq(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32oeq
+; CHECK-LABEL: test_vsel32oeq:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vseleq.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp oeq float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vseleq.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64oeq(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64oeq
+; CHECK-LABEL: test_vsel64oeq:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vseleq.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp oeq float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vseleq.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32ugt(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ugt
+; CHECK-LABEL: test_vsel32ugt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s3, s2
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ugt float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselge.f32 s0, s3, s2
   ret void
 }
 define void @test_vsel64ugt(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ugt
+; CHECK-LABEL: test_vsel64ugt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d2, d1
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ugt float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselge.f64 d16, d2, d1
   ret void
 }
 define void @test_vsel32uge(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32uge
+; CHECK-LABEL: test_vsel32uge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselgt.f32 s0, s3, s2
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp uge float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselgt.f32 s0, s3, s2
   ret void
 }
 define void @test_vsel64uge(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64uge
+; CHECK-LABEL: test_vsel64uge:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselgt.f64 d16, d2, d1
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp uge float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselgt.f64 d16, d2, d1
   ret void
 }
 define void @test_vsel32olt(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32olt
+; CHECK-LABEL: test_vsel32olt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselgt.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp olt float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselgt.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64olt(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64olt
+; CHECK-LABEL: test_vsel64olt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselgt.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp olt float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselgt.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32ult(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ult
+; CHECK-LABEL: test_vsel32ult:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s3, s2
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ult float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselge.f32 s0, s3, s2
   ret void
 }
 define void @test_vsel64ult(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ult
+; CHECK-LABEL: test_vsel64ult:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d2, d1
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ult float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselge.f64 d16, d2, d1
   ret void
 }
 define void @test_vsel32ole(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ole
+; CHECK-LABEL: test_vsel32ole:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ole float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselge.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64ole(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ole
+; CHECK-LABEL: test_vsel64ole:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ole float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselge.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32ule(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ule
+; CHECK-LABEL: test_vsel32ule:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselgt.f32 s0, s3, s2
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ule float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselgt.f32 s0, s3, s2
   ret void
 }
 define void @test_vsel64ule(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ule
+; CHECK-LABEL: test_vsel64ule:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselgt.f64 d16, d2, d1
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ule float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselgt.f64 d16, d2, d1
   ret void
 }
 define void @test_vsel32ord(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ord
+; CHECK-LABEL: test_vsel32ord:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselvs.f32 s0, s3, s2
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ord float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselvs.f32 s0, s3, s2
   ret void
 }
 define void @test_vsel64ord(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ord
+; CHECK-LABEL: test_vsel64ord:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselvs.f64 d16, d2, d1
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp ord float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselvs.f64 d16, d2, d1
   ret void
 }
 define void @test_vsel32une(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32une
+; CHECK-LABEL: test_vsel32une:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vseleq.f32 s0, s3, s2
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp une float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vseleq.f32 s0, s3, s2
   ret void
 }
 define void @test_vsel64une(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64une
+; CHECK-LABEL: test_vsel64une:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vseleq.f64 d16, d2, d1
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp une float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vseleq.f64 d16, d2, d1
   ret void
 }
 define void @test_vsel32uno(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32uno
+; CHECK-LABEL: test_vsel32uno:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselvs.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp uno float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselvs.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64uno(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64uno
+; CHECK-LABEL: test_vsel64uno:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselvs.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp uno float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselvs.f64 d16, d1, d2
   ret void
 }
 
 define void @test_vsel32ogt_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ogt_nnan
+; CHECK-LABEL: test_vsel32ogt_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselgt.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ogt float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselgt.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64ogt_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ogt_nnan
+; CHECK-LABEL: test_vsel64ogt_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselgt.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ogt float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselgt.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32oge_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32oge_nnan
+; CHECK-LABEL: test_vsel32oge_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan oge float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselge.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64oge_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64oge_nnan
+; CHECK-LABEL: test_vsel64oge_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan oge float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselge.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32oeq_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32oeq_nnan
+; CHECK-LABEL: test_vsel32oeq_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vseleq.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan oeq float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vseleq.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64oeq_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64oeq_nnan
+; CHECK-LABEL: test_vsel64oeq_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vseleq.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan oeq float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vseleq.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32ugt_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ugt_nnan
+; CHECK-LABEL: test_vsel32ugt_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselgt.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ugt float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32	s0, s1
-; CHECK: vselgt.f32	s0, s2, s3
   ret void
 }
 define void @test_vsel64ugt_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ugt_nnan
+; CHECK-LABEL: test_vsel64ugt_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselgt.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ugt float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselgt.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32uge_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32uge_nnan
+; CHECK-LABEL: test_vsel32uge_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan uge float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselge.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64uge_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64uge_nnan
+; CHECK-LABEL: test_vsel64uge_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan uge float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselge.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32olt_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32olt_nnan
+; CHECK-LABEL: test_vsel32olt_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselgt.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan olt float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselgt.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64olt_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64olt_nnan
+; CHECK-LABEL: test_vsel64olt_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselgt.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan olt float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselgt.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32ult_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ult_nnan
+; CHECK-LABEL: test_vsel32ult_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselgt.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ult float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselgt.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64ult_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ult_nnan
+; CHECK-LABEL: test_vsel64ult_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselgt.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ult float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselgt.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32ole_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ole_nnan
+; CHECK-LABEL: test_vsel32ole_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ole float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselge.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64ole_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ole_nnan
+; CHECK-LABEL: test_vsel64ole_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ole float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselge.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32ule_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ule_nnan
+; CHECK-LABEL: test_vsel32ule_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ule float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselge.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64ule_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ule_nnan
+; CHECK-LABEL: test_vsel64ule_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s1, s0
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ule float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s1, s0
-; CHECK: vselge.f64 d16, d1, d2
   ret void
 }
 define void @test_vsel32ord_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32ord_nnan
+; CHECK-LABEL: test_vsel32ord_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselvs.f32 s0, s3, s2
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ord float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselvs.f32 s0, s3, s2
   ret void
 }
 define void @test_vsel64ord_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64ord_nnan
+; CHECK-LABEL: test_vsel64ord_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselvs.f64 d16, d2, d1
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan ord float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselvs.f64 d16, d2, d1
   ret void
 }
 define void @test_vsel32une_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32une_nnan
+; CHECK-LABEL: test_vsel32une_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vseleq.f32 s0, s3, s2
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan une float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vseleq.f32 s0, s3, s2
   ret void
 }
 define void @test_vsel64une_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64une_nnan
+; CHECK-LABEL: test_vsel64une_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vseleq.f64 d16, d2, d1
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan une float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vseleq.f64 d16, d2, d1
   ret void
 }
 define void @test_vsel32uno_nnan(float %lhs32, float %rhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel32uno_nnan
+; CHECK-LABEL: test_vsel32uno_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:varfloat
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:varfloat
+; CHECK-NEXT:    vselvs.f32 s0, s2, s3
+; CHECK-NEXT:    vstr s0, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan uno float %lhs32, %rhs32
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselvs.f32 s0, s2, s3
   ret void
 }
 define void @test_vsel64uno_nnan(float %lhs32, float %rhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel64uno_nnan
+; CHECK-LABEL: test_vsel64uno_nnan:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vcmp.f32 s0, s1
+; CHECK-NEXT:    movw r0, :lower16:vardouble
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    movt r0, :upper16:vardouble
+; CHECK-NEXT:    vselvs.f64 d16, d1, d2
+; CHECK-NEXT:    vstr d16, [r0]
+; CHECK-NEXT:    bx lr
   %tst1 = fcmp nnan uno float %lhs32, %rhs32
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: vcmp.f32 s0, s1
-; CHECK: vselvs.f64 d16, d1, d2
   ret void
 }
 
 define void @test_vsel_ltzero(i32 %lhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel_ltzero
+; CHECK-LABEL: test_vsel_ltzero:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r1, :lower16:varfloat
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    movt r1, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s1, s0
+; CHECK-NEXT:    vstr s0, [r1]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp slt i32 %lhs32, 0
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: cmp r0, #0
-; CHECK: vselge.f32 s0, s1, s0
   ret void
 }
 
 define void @test_vsel_lezero(i32 %lhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel_lezero
+; CHECK-LABEL: test_vsel_lezero:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r1, :lower16:varfloat
+; CHECK-NEXT:    cmp r0, #1
+; CHECK-NEXT:    movt r1, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s1, s0
+; CHECK-NEXT:    vstr s0, [r1]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sle i32 %lhs32, 0
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: cmp r0, #1
-; CHECK: vselge.f32 s0, s1, s0
   ret void
 }
 
 define void @test_vsel_gtzero(i32 %lhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel_gtzero
+; CHECK-LABEL: test_vsel_gtzero:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r1, :lower16:varfloat
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    movt r1, :upper16:varfloat
+; CHECK-NEXT:    vselgt.f32 s0, s0, s1
+; CHECK-NEXT:    vstr s0, [r1]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sgt i32 %lhs32, 0
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: cmp r0, #0
-; CHECK: vselgt.f32 s0, s0, s1
   ret void
 }
 
 define void @test_vsel_gezero(i32 %lhs32, float %a, float %b) {
-; CHECK-LABEL: test_vsel_gezero
+; CHECK-LABEL: test_vsel_gezero:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r1, :lower16:varfloat
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    movt r1, :upper16:varfloat
+; CHECK-NEXT:    vselge.f32 s0, s0, s1
+; CHECK-NEXT:    vstr s0, [r1]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sge i32 %lhs32, 0
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, ptr @varfloat
-; CHECK: cmn r0, #1
-; CHECK: vselgt.f32 s0, s0, s1
   ret void
 }
 
 define void @test_vsel_ltzero64(i32 %lhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel_ltzero
+; CHECK-LABEL: test_vsel_ltzero64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r1, :lower16:vardouble
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    movt r1, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d1, d0
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp slt i32 %lhs32, 0
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: cmp r0, #0
-; CHECK: vselge.f64 d16, d1, d0
   ret void
 }
 
 define void @test_vsel_lezero64(i32 %lhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel_lezero
+; CHECK-LABEL: test_vsel_lezero64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r1, :lower16:vardouble
+; CHECK-NEXT:    cmp r0, #1
+; CHECK-NEXT:    movt r1, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d1, d0
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sle i32 %lhs32, 0
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: cmp r0, #1
-; CHECK: vselge.f64 d16, d1, d0
   ret void
 }
 
 define void @test_vsel_gtzero64(i32 %lhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel_gtzero
+; CHECK-LABEL: test_vsel_gtzero64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r1, :lower16:vardouble
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    movt r1, :upper16:vardouble
+; CHECK-NEXT:    vselgt.f64 d16, d0, d1
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sgt i32 %lhs32, 0
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: cmp r0, #0
-; CHECK: vselgt.f64 d16, d0, d1
   ret void
 }
 
 define void @test_vsel_gezero64(i32 %lhs32, double %a, double %b) {
-; CHECK-LABEL: test_vsel_gezero
+; CHECK-LABEL: test_vsel_gezero64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    movw r1, :lower16:vardouble
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    movt r1, :upper16:vardouble
+; CHECK-NEXT:    vselge.f64 d16, d0, d1
+; CHECK-NEXT:    vstr d16, [r1]
+; CHECK-NEXT:    bx lr
   %tst1 = icmp sge i32 %lhs32, 0
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, ptr @vardouble
-; CHECK: cmn r0, #1
-; CHECK: vselgt.f64 d16, d0, d1
   ret void
 }
diff --git a/llvm/test/CodeGen/Thumb2/mve-memtp-loop.ll b/llvm/test/CodeGen/Thumb2/mve-memtp-loop.ll
index da59cb259db61..af17bb1430634 100644
--- a/llvm/test/CodeGen/Thumb2/mve-memtp-loop.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-memtp-loop.ll
@@ -211,9 +211,9 @@ entry:
 define void @test11(ptr nocapture %x, ptr nocapture %y, i32 %n) {
 ; CHECK-LABEL: test11:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    cmp.w r2, #-1
-; CHECK-NEXT:    it gt
-; CHECK-NEXT:    bxgt lr
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    it pl
+; CHECK-NEXT:    bxpl lr
 ; CHECK-NEXT:  .LBB10_1: @ %prehead
 ; CHECK-NEXT:    .save {r4, lr}
 ; CHECK-NEXT:    push {r4, lr}
diff --git a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll
index 8777d517c4bad..0a014c76c6cba 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll
@@ -40,8 +40,8 @@ define hidden i32 @_Z1hii(i32 %a, i32 %b) local_unnamed_addr #0 {
 ; CHECK-NEXT:    .cfi_offset lr, -4
 ; CHECK-NEXT:    .cfi_offset ra_auth_code, -8
 ; CHECK-NEXT:    .cfi_offset r7, -12
-; CHECK-NEXT:    cmp.w r0, #-1
-; CHECK-NEXT:    ble .LBB0_2
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    bmi .LBB0_2
 ; CHECK-NEXT:  @ %bb.1: @ %if.end
 ; CHECK-NEXT:    add r0, r1
 ; CHECK-NEXT:    pop.w {r3, r7, r12, lr}



More information about the llvm-commits mailing list