[llvm] [DAG] Refactor X86 combineVSelectWithAllOnesOrZeros fold into a generic DAG Combine (PR #145298)

via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 23 02:37:52 PDT 2025


https://github.com/woruyu created https://github.com/llvm/llvm-project/pull/145298

This PR resolves https://github.com/llvm/llvm-project/issues/144513

The modification include five pattern :
1.vselect Cond, 0, 0 → 0
2.vselect Cond, -1, 0 → bitcast Cond
3.vselect Cond, -1, x → or Cond, x
4.vselect Cond, x, 0 → and Cond, x
5.vselect Cond, 000..., X -> andn Cond, X

1-4 have been migrated to DAGCombine. 5 still in x86 code.

The reason is that you cannot use the andn instruction directly in DAGCombine, you can only use and+xor, which will introduce optimization order issues. For example, in the x86 backend, select Cond, 0, x → (~Cond) & x, the backend will first check whether the cond node of (~Cond) is a setcc node. If so, it will modify the comparison operator of the condition.So the x86 backend cannot complete the optimization of andn.In short, I think it is a better choice to keep the pattern of vselect Cond, 000..., X instead of and+xor in combineDAG.

For commit, the first is code changes and x86 test(note 1), the second is tests in other backend(node 2).

>From fe9de4b6a50629a319b6215961ab374f9993bb14 Mon Sep 17 00:00:00 2001
From: woruyu <1214539920 at qq.com>
Date: Mon, 23 Jun 2025 16:41:19 +0800
Subject: [PATCH 1/2] feat: move combineVSelectWithAllOnesOrZeros to
 DAGCombiner and x86 test

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 82 +++++++++++++++++++
 llvm/lib/Target/X86/X86ISelLowering.cpp       | 72 ++--------------
 .../X86/urem-seteq-vec-tautological.ll        | 17 ++--
 3 files changed, 98 insertions(+), 73 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 56a5643e13442..0dce13035f33a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -12945,6 +12945,85 @@ SDValue DAGCombiner::visitVP_SELECT(SDNode *N) {
   return SDValue();
 }
 
+static SDValue combineVSelectWithAllOnesOrZeros(SDValue Cond, SDValue TVal,
+                                                SDValue FVal,
+                                                const TargetLowering &TLI,
+                                                SelectionDAG &DAG,
+                                                const SDLoc &DL) {
+  if (!TLI.isTypeLegal(TVal.getValueType()))
+    return SDValue();
+
+  EVT VT = TVal.getValueType();
+  EVT CondVT = Cond.getValueType();
+
+  assert(CondVT.isVector() && "Vector select expects a vector selector!");
+
+  // Classify TVal/FVal content
+  bool IsTAllZero = ISD::isBuildVectorAllZeros(TVal.getNode());
+  bool IsTAllOne = ISD::isBuildVectorAllOnes(TVal.getNode());
+  bool IsFAllZero = ISD::isBuildVectorAllZeros(FVal.getNode());
+  bool IsFAllOne = ISD::isBuildVectorAllOnes(FVal.getNode());
+
+  // no vselect(cond, 0/-1, X) or vselect(cond, X, 0/-1), return
+  if (!(IsTAllZero || IsTAllOne || IsFAllZero || IsFAllOne))
+    return SDValue();
+
+  // select Cond, 0, 0 → 0
+  if (IsTAllZero && IsFAllZero) {
+    return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, DL, VT)
+                                : DAG.getConstant(0, DL, VT);
+  }
+
+  // To use the condition operand as a bitwise mask, it must have elements that
+  // are the same size as the select elements. Ie, the condition operand must
+  // have already been promoted from the IR select condition type <N x i1>.
+  // Don't check if the types themselves are equal because that excludes
+  // vector floating-point selects.
+  if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
+    return SDValue();
+
+  // Try inverting Cond and swapping T/F if it gives all-ones/all-zeros form
+  if (!IsTAllOne && !IsFAllZero && Cond.hasOneUse() &&
+      Cond.getOpcode() == ISD::SETCC &&
+      TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
+          CondVT) {
+    if (IsTAllZero || IsFAllOne) {
+      SDValue CC = Cond.getOperand(2);
+      ISD::CondCode InverseCC = ISD::getSetCCInverse(
+          cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
+      Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
+                          InverseCC);
+      std::swap(TVal, FVal);
+      std::swap(IsTAllOne, IsFAllOne);
+      std::swap(IsTAllZero, IsFAllZero);
+    }
+  }
+
+  // Cond value must be 'sign splat' to be converted to a logical op.
+  if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
+    return SDValue();
+
+  // select Cond, -1, 0 → bitcast Cond
+  if (IsTAllOne && IsFAllZero)
+    return DAG.getBitcast(VT, Cond);
+
+  // select Cond, -1, x → or Cond, x
+  if (IsTAllOne) {
+    SDValue X = DAG.getBitcast(CondVT, FVal);
+    SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, X);
+    return DAG.getBitcast(VT, Or);
+  }
+
+  // select Cond, x, 0 → and Cond, x
+  if (IsFAllZero) {
+    SDValue X = DAG.getBitcast(CondVT, TVal);
+    SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, X);
+    return DAG.getBitcast(VT, And);
+  }
+
+  return SDValue();
+}
+
 SDValue DAGCombiner::visitVSELECT(SDNode *N) {
   SDValue N0 = N->getOperand(0);
   SDValue N1 = N->getOperand(1);
@@ -13213,6 +13292,9 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
   if (SimplifyDemandedVectorElts(SDValue(N, 0)))
     return SDValue(N, 0);
 
+  if (SDValue V = combineVSelectWithAllOnesOrZeros(N0, N1, N2, TLI, DAG, DL))
+    return V;
+
   return SDValue();
 }
 
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2541182de1208..ed462d9692358 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -47264,13 +47264,14 @@ static SDValue combineToExtendBoolVectorInReg(
                      DAG.getConstant(EltSizeInBits - 1, DL, VT));
 }
 
-/// If a vector select has an operand that is -1 or 0, try to simplify the
+/// If a vector select has an left operand that is 0, try to simplify the
 /// select to a bitwise logic operation.
-/// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
-static SDValue
-combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG, const SDLoc &DL,
-                                 TargetLowering::DAGCombinerInfo &DCI,
-                                 const X86Subtarget &Subtarget) {
+/// TODO: Move to DAGCombiner.combineVSelectWithAllOnesOrZeros, possibly using
+/// TargetLowering::hasAndNot()?
+static SDValue combineVSelectWithLastZeros(SDNode *N, SelectionDAG &DAG,
+                                           const SDLoc &DL,
+                                           TargetLowering::DAGCombinerInfo &DCI,
+                                           const X86Subtarget &Subtarget) {
   SDValue Cond = N->getOperand(0);
   SDValue LHS = N->getOperand(1);
   SDValue RHS = N->getOperand(2);
@@ -47283,20 +47284,6 @@ combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG, const SDLoc &DL,
 
   assert(CondVT.isVector() && "Vector select expects a vector selector!");
 
-  // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
-  // TODO: Can we assert that both operands are not zeros (because that should
-  //       get simplified at node creation time)?
-  bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
-  bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
-
-  // If both inputs are 0/undef, create a complete zero vector.
-  // FIXME: As noted above this should be handled by DAGCombiner/getNode.
-  if (TValIsAllZeros && FValIsAllZeros) {
-    if (VT.isFloatingPoint())
-      return DAG.getConstantFP(0.0, DL, VT);
-    return DAG.getConstant(0, DL, VT);
-  }
-
   // To use the condition operand as a bitwise mask, it must have elements that
   // are the same size as the select elements. Ie, the condition operand must
   // have already been promoted from the IR select condition type <N x i1>.
@@ -47305,56 +47292,15 @@ combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG, const SDLoc &DL,
   if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
     return SDValue();
 
-  // Try to invert the condition if true value is not all 1s and false value is
-  // not all 0s. Only do this if the condition has one use.
-  bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
-  if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
-      // Check if the selector will be produced by CMPP*/PCMP*.
-      Cond.getOpcode() == ISD::SETCC &&
-      // Check if SETCC has already been promoted.
-      TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
-          CondVT) {
-    bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
-
-    if (TValIsAllZeros || FValIsAllOnes) {
-      SDValue CC = Cond.getOperand(2);
-      ISD::CondCode NewCC = ISD::getSetCCInverse(
-          cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
-      Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
-                          NewCC);
-      std::swap(LHS, RHS);
-      TValIsAllOnes = FValIsAllOnes;
-      FValIsAllZeros = TValIsAllZeros;
-    }
-  }
-
   // Cond value must be 'sign splat' to be converted to a logical op.
   if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
     return SDValue();
 
-  // vselect Cond, 111..., 000... -> Cond
-  if (TValIsAllOnes && FValIsAllZeros)
-    return DAG.getBitcast(VT, Cond);
-
   if (!TLI.isTypeLegal(CondVT))
     return SDValue();
 
-  // vselect Cond, 111..., X -> or Cond, X
-  if (TValIsAllOnes) {
-    SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
-    SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
-    return DAG.getBitcast(VT, Or);
-  }
-
-  // vselect Cond, X, 000... -> and Cond, X
-  if (FValIsAllZeros) {
-    SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
-    SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
-    return DAG.getBitcast(VT, And);
-  }
-
   // vselect Cond, 000..., X -> andn Cond, X
-  if (TValIsAllZeros) {
+  if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
     SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
     SDValue AndN;
     // The canonical form differs for i1 vectors - x86andnp is not used
@@ -48117,7 +48063,7 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
   if (!TLI.isTypeLegal(VT) || isSoftF16(VT, Subtarget))
     return SDValue();
 
-  if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DL, DCI, Subtarget))
+  if (SDValue V = combineVSelectWithLastZeros(N, DAG, DL, DCI, Subtarget))
     return V;
 
   if (SDValue V = combineVSelectToBLENDV(N, DAG, DL, DCI, Subtarget))
diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll
index 36094fe56d577..84856aab85079 100644
--- a/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll
+++ b/llvm/test/CodeGen/X86/urem-seteq-vec-tautological.ll
@@ -77,11 +77,9 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
 ; CHECK-SSE2-LABEL: t1_all_odd_ne:
 ; CHECK-SSE2:       # %bb.0:
 ; CHECK-SSE2-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; CHECK-SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-SSE2-NEXT:    pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT:    por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-SSE2-NEXT:    retq
 ;
 ; CHECK-SSE41-LABEL: t1_all_odd_ne:
@@ -92,7 +90,7 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
 ; CHECK-SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
 ; CHECK-SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
 ; CHECK-SSE41-NEXT:    pxor %xmm1, %xmm0
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; CHECK-SSE41-NEXT:    por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-SSE41-NEXT:    retq
 ;
 ; CHECK-AVX1-LABEL: t1_all_odd_ne:
@@ -102,7 +100,7 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
 ; CHECK-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; CHECK-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; CHECK-AVX1-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    retq
 ;
 ; CHECK-AVX2-LABEL: t1_all_odd_ne:
@@ -113,17 +111,16 @@ define <4 x i1> @t1_all_odd_ne(<4 x i32> %X) nounwind {
 ; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; CHECK-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; CHECK-AVX2-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    retq
 ;
 ; CHECK-AVX512VL-LABEL: t1_all_odd_ne:
 ; CHECK-AVX512VL:       # %bb.0:
 ; CHECK-AVX512VL-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpternlogq {{.*#+}} xmm0 = ~xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpternlogq {{.*#+}} xmm0 = m64bcst | (xmm0 ^ xmm1)
 ; CHECK-AVX512VL-NEXT:    retq
   %urem = urem <4 x i32> %X, <i32 3, i32 1, i32 1, i32 9>
   %cmp = icmp ne <4 x i32> %urem, <i32 0, i32 42, i32 0, i32 42>

>From 350b02085c81e12b9254c7c6dff6f2dcaea7e5c7 Mon Sep 17 00:00:00 2001
From: woruyu <1214539920 at qq.com>
Date: Mon, 23 Jun 2025 17:01:43 +0800
Subject: [PATCH 2/2] test: update other backend testcase

---
 llvm/test/CodeGen/AArch64/arm64-zip.ll        |   2 +-
 llvm/test/CodeGen/AArch64/cmp-select-sign.ll  |  16 +-
 llvm/test/CodeGen/AArch64/concatbinop.ll      |   2 +-
 llvm/test/CodeGen/AArch64/sat-add.ll          |   8 +-
 llvm/test/CodeGen/AArch64/select_cc.ll        |   2 +-
 .../CodeGen/AArch64/selectcc-to-shiftand.ll   |   3 -
 llvm/test/CodeGen/AArch64/tbl-loops.ll        |  12 +-
 .../test/CodeGen/AArch64/vselect-constants.ll |   4 +-
 llvm/test/CodeGen/AArch64/vselect-ext.ll      | 140 ++---
 llvm/test/CodeGen/ARM/fpclamptosat_vec.ll     |  14 +-
 .../CodeGen/ARM/minnum-maxnum-intrinsics.ll   |   2 +-
 llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll |   3 +-
 llvm/test/CodeGen/LoongArch/lsx/vselect.ll    |   4 +-
 llvm/test/CodeGen/Mips/msa/compare_float.ll   | 491 ++++++++++--------
 llvm/test/CodeGen/PowerPC/recipest.ll         |   9 +-
 llvm/test/CodeGen/PowerPC/sat-add.ll          |  21 +-
 .../CodeGen/SystemZ/vec-max-min-zerosplat.ll  |  65 ++-
 17 files changed, 417 insertions(+), 381 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/arm64-zip.ll b/llvm/test/CodeGen/AArch64/arm64-zip.ll
index b24e54a68fb42..20d0c7f1b7085 100644
--- a/llvm/test/CodeGen/AArch64/arm64-zip.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-zip.ll
@@ -413,7 +413,7 @@ define <4 x float> @shuffle_zip1(<4 x float> %arg) {
 ; CHECK-NEXT:    fmov.4s v1, #1.00000000
 ; CHECK-NEXT:    zip1.4h v0, v0, v0
 ; CHECK-NEXT:    sshll.4s v0, v0, #0
-; CHECK-NEXT:    and.16b v0, v1, v0
+; CHECK-NEXT:    and.16b v0, v0, v1
 ; CHECK-NEXT:    ret
 bb:
   %inst = fcmp olt <4 x float> zeroinitializer, %arg
diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
index b4f179e992a0d..6bbbcf88167d8 100644
--- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
@@ -114,9 +114,10 @@ define i64 @not_sign_i64_4(i64 %a) {
 define <7 x i8> @sign_7xi8(<7 x i8> %a) {
 ; CHECK-LABEL: sign_7xi8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi v1.8b, #1
-; CHECK-NEXT:    cmlt v0.8b, v0.8b, #0
-; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    movi v1.2d, #0xffffffffffffffff
+; CHECK-NEXT:    movi v2.8b, #1
+; CHECK-NEXT:    cmge v0.8b, v1.8b, v0.8b
+; CHECK-NEXT:    orr v0.8b, v0.8b, v2.8b
 ; CHECK-NEXT:    ret
   %c = icmp sgt <7 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %res = select <7 x i1> %c, <7 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <7 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
@@ -150,7 +151,8 @@ define <16 x i8> @sign_16xi8(<16 x i8> %a) {
 define <3 x i32> @sign_3xi32(<3 x i32> %a) {
 ; CHECK-LABEL: sign_3xi32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-NEXT:    movi v1.2d, #0xffffffffffffffff
+; CHECK-NEXT:    cmge v0.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    orr v0.4s, #1
 ; CHECK-NEXT:    ret
   %c = icmp sgt <3 x i32> %a, <i32 -1, i32 -1, i32 -1>
@@ -197,11 +199,9 @@ define <4 x i32> @not_sign_4xi32(<4 x i32> %a) {
 ; CHECK-LABEL: not_sign_4xi32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x8, .LCPI16_0
-; CHECK-NEXT:    movi v2.4s, #1
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI16_0]
-; CHECK-NEXT:    cmgt v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    and v1.16b, v0.16b, v2.16b
-; CHECK-NEXT:    orn v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    cmge v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #1
 ; CHECK-NEXT:    ret
   %c = icmp sgt <4 x i32> %a, <i32 1, i32 -1, i32 -1, i32 -1>
   %res = select <4 x i1> %c, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
diff --git a/llvm/test/CodeGen/AArch64/concatbinop.ll b/llvm/test/CodeGen/AArch64/concatbinop.ll
index 828182d18b38c..062a5a8c35b2c 100644
--- a/llvm/test/CodeGen/AArch64/concatbinop.ll
+++ b/llvm/test/CodeGen/AArch64/concatbinop.ll
@@ -179,7 +179,7 @@ define <16 x i8> @signOf_neon(ptr nocapture noundef readonly %a, ptr nocapture n
 ; CHECK-NEXT:    uzp1 v3.16b, v5.16b, v6.16b
 ; CHECK-NEXT:    uzp1 v1.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    and v0.16b, v3.16b, v0.16b
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    orr v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    ret
 entry:
   %0 = load <8 x i16>, ptr %a, align 2
diff --git a/llvm/test/CodeGen/AArch64/sat-add.ll b/llvm/test/CodeGen/AArch64/sat-add.ll
index 2deb19be24821..ecd48d6b7c65b 100644
--- a/llvm/test/CodeGen/AArch64/sat-add.ll
+++ b/llvm/test/CodeGen/AArch64/sat-add.ll
@@ -530,7 +530,7 @@ define <16 x i8> @unsigned_sat_variable_v16i8_using_cmp_notval(<16 x i8> %x, <16
 ; CHECK-NEXT:    mvn v2.16b, v1.16b
 ; CHECK-NEXT:    add v1.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    cmhi v0.16b, v0.16b, v2.16b
-; CHECK-NEXT:    orr v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %noty = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %a = add <16 x i8> %x, %y
@@ -570,7 +570,7 @@ define <8 x i16> @unsigned_sat_variable_v8i16_using_cmp_notval(<8 x i16> %x, <8
 ; CHECK-NEXT:    mvn v2.16b, v1.16b
 ; CHECK-NEXT:    add v1.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    cmhi v0.8h, v0.8h, v2.8h
-; CHECK-NEXT:    orr v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %noty = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %a = add <8 x i16> %x, %y
@@ -610,7 +610,7 @@ define <4 x i32> @unsigned_sat_variable_v4i32_using_cmp_notval(<4 x i32> %x, <4
 ; CHECK-NEXT:    mvn v2.16b, v1.16b
 ; CHECK-NEXT:    add v1.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    cmhi v0.4s, v0.4s, v2.4s
-; CHECK-NEXT:    orr v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %noty = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
   %a = add <4 x i32> %x, %y
@@ -651,7 +651,7 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_notval(<2 x i64> %x, <2
 ; CHECK-NEXT:    mvn v2.16b, v1.16b
 ; CHECK-NEXT:    add v1.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    cmhi v0.2d, v0.2d, v2.2d
-; CHECK-NEXT:    orr v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %noty = xor <2 x i64> %y, <i64 -1, i64 -1>
   %a = add <2 x i64> %x, %y
diff --git a/llvm/test/CodeGen/AArch64/select_cc.ll b/llvm/test/CodeGen/AArch64/select_cc.ll
index 73e4d4c7f0aeb..483f6c26af8c1 100644
--- a/llvm/test/CodeGen/AArch64/select_cc.ll
+++ b/llvm/test/CodeGen/AArch64/select_cc.ll
@@ -88,7 +88,7 @@ define <2 x double> @select_olt_load_cmp(<2 x double> %a, ptr %src) {
 ; CHECK-SD-NEXT:    ldr d1, [x0]
 ; CHECK-SD-NEXT:    fcmgt v1.2s, v1.2s, #0.0
 ; CHECK-SD-NEXT:    sshll v1.2d, v1.2s, #0
-; CHECK-SD-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-SD-NEXT:    and v0.16b, v1.16b, v0.16b
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: select_olt_load_cmp:
diff --git a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
index 32fc9c1377704..0d4a636446164 100644
--- a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
+++ b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
@@ -249,9 +249,6 @@ define <16 x i8> @sel_shift_bool_v16i8(<16 x i1> %t) {
 ; CHECK-SD-LABEL: sel_shift_bool_v16i8:
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    shl v0.16b, v0.16b, #7
-; CHECK-SD-NEXT:    movi v1.16b, #128
-; CHECK-SD-NEXT:    cmlt v0.16b, v0.16b, #0
-; CHECK-SD-NEXT:    and v0.16b, v0.16b, v1.16b
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: sel_shift_bool_v16i8:
diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index b5d64112db727..aa0a163b96ac8 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -31,12 +31,12 @@ define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
 ; CHECK-NEXT:    add x13, x13, #32
 ; CHECK-NEXT:    fcmgt v3.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    fcmgt v4.4s, v2.4s, v0.4s
-; CHECK-NEXT:    fcmlt v5.4s, v1.4s, #0.0
-; CHECK-NEXT:    fcmlt v6.4s, v2.4s, #0.0
-; CHECK-NEXT:    bit v1.16b, v0.16b, v3.16b
-; CHECK-NEXT:    bit v2.16b, v0.16b, v4.16b
-; CHECK-NEXT:    bic v1.16b, v1.16b, v5.16b
-; CHECK-NEXT:    bic v2.16b, v2.16b, v6.16b
+; CHECK-NEXT:    bsl v3.16b, v0.16b, v1.16b
+; CHECK-NEXT:    bsl v4.16b, v0.16b, v2.16b
+; CHECK-NEXT:    fcmlt v1.4s, v1.4s, #0.0
+; CHECK-NEXT:    fcmlt v2.4s, v2.4s, #0.0
+; CHECK-NEXT:    bic v1.16b, v3.16b, v1.16b
+; CHECK-NEXT:    bic v2.16b, v4.16b, v2.16b
 ; CHECK-NEXT:    fcvtzs v1.4s, v1.4s
 ; CHECK-NEXT:    fcvtzs v2.4s, v2.4s
 ; CHECK-NEXT:    xtn v1.4h, v1.4s
diff --git a/llvm/test/CodeGen/AArch64/vselect-constants.ll b/llvm/test/CodeGen/AArch64/vselect-constants.ll
index a7cf5ece5d270..fe125c9626ea3 100644
--- a/llvm/test/CodeGen/AArch64/vselect-constants.ll
+++ b/llvm/test/CodeGen/AArch64/vselect-constants.ll
@@ -146,10 +146,8 @@ define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) {
 define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) {
 ; CHECK-LABEL: sel_1_or_0_vec:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-NEXT:    movi v1.4s, #1
-; CHECK-NEXT:    shl v0.4s, v0.4s, #31
-; CHECK-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %add = select <4 x i1> %cond, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
diff --git a/llvm/test/CodeGen/AArch64/vselect-ext.ll b/llvm/test/CodeGen/AArch64/vselect-ext.ll
index 76b7f3d9dfc0e..4f2b9c5a62669 100644
--- a/llvm/test/CodeGen/AArch64/vselect-ext.ll
+++ b/llvm/test/CodeGen/AArch64/vselect-ext.ll
@@ -12,10 +12,10 @@ define <16 x i32> @no_existing_zext(<16 x i8> %a, <16 x i32> %op) {
 ; CHECK-NEXT:    sshll.4s v6, v5, #0
 ; CHECK-NEXT:    sshll.4s v7, v0, #0
 ; CHECK-NEXT:    sshll2.4s v5, v5, #0
-; CHECK-NEXT:    and.16b v4, v4, v16
-; CHECK-NEXT:    and.16b v0, v1, v6
-; CHECK-NEXT:    and.16b v1, v2, v5
-; CHECK-NEXT:    and.16b v2, v3, v7
+; CHECK-NEXT:    and.16b v4, v16, v4
+; CHECK-NEXT:    and.16b v0, v6, v1
+; CHECK-NEXT:    and.16b v1, v5, v2
+; CHECK-NEXT:    and.16b v2, v7, v3
 ; CHECK-NEXT:    mov.16b v3, v4
 ; CHECK-NEXT:    ret
 entry:
@@ -40,10 +40,10 @@ define <16 x i32> @second_compare_operand_not_splat(<16 x i8> %a, <16 x i8> %b)
 ; CHECK-NEXT:    sshll.4s v7, v1, #0
 ; CHECK-NEXT:    sshll2.4s v16, v3, #0
 ; CHECK-NEXT:    sshll2.4s v1, v1, #0
-; CHECK-NEXT:    and.16b v0, v4, v0
-; CHECK-NEXT:    and.16b v3, v6, v1
-; CHECK-NEXT:    and.16b v1, v2, v16
-; CHECK-NEXT:    and.16b v2, v5, v7
+; CHECK-NEXT:    and.16b v0, v0, v4
+; CHECK-NEXT:    and.16b v3, v1, v6
+; CHECK-NEXT:    and.16b v1, v16, v2
+; CHECK-NEXT:    and.16b v2, v7, v5
 ; CHECK-NEXT:    ret
 entry:
   %ext = zext <16 x i8> %a to <16 x i32>
@@ -69,10 +69,10 @@ define <16 x i32> @same_zext_used_in_cmp_signed_pred_and_select(<16 x i8> %a) {
 ; CHECK-NEXT:    sshll.4s v7, v1, #0
 ; CHECK-NEXT:    sshll2.4s v16, v3, #0
 ; CHECK-NEXT:    sshll2.4s v1, v1, #0
-; CHECK-NEXT:    and.16b v0, v4, v0
-; CHECK-NEXT:    and.16b v3, v6, v1
-; CHECK-NEXT:    and.16b v1, v2, v16
-; CHECK-NEXT:    and.16b v2, v5, v7
+; CHECK-NEXT:    and.16b v0, v0, v4
+; CHECK-NEXT:    and.16b v3, v1, v6
+; CHECK-NEXT:    and.16b v1, v16, v2
+; CHECK-NEXT:    and.16b v2, v7, v5
 ; CHECK-NEXT:    ret
 entry:
   %ext = zext <16 x i8> %a to <16 x i32>
@@ -97,10 +97,10 @@ define <8 x i64> @same_zext_used_in_cmp_unsigned_pred_and_select_v8i64(<8 x i8>
 ; CHECK-NEXT:    cmhi.2d v7, v1, v2
 ; CHECK-NEXT:    cmhi.2d v6, v5, v2
 ; CHECK-NEXT:    cmhi.2d v2, v4, v2
-; CHECK-NEXT:    and.16b v0, v3, v0
-; CHECK-NEXT:    and.16b v1, v1, v7
-; CHECK-NEXT:    and.16b v3, v4, v2
-; CHECK-NEXT:    and.16b v2, v5, v6
+; CHECK-NEXT:    and.16b v0, v0, v3
+; CHECK-NEXT:    and.16b v1, v7, v1
+; CHECK-NEXT:    and.16b v3, v2, v4
+; CHECK-NEXT:    and.16b v2, v6, v5
 ; CHECK-NEXT:    ret
   %ext = zext <8 x i8> %a to <8 x i64>
   %cmp = icmp ugt <8 x i8> %a, <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
@@ -123,10 +123,10 @@ define <16 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v16i32(<16 x i
 ; CHECK-NEXT:    cmhi.4s v7, v2, v1
 ; CHECK-NEXT:    cmhi.4s v6, v5, v1
 ; CHECK-NEXT:    cmhi.4s v1, v4, v1
-; CHECK-NEXT:    and.16b v0, v3, v0
-; CHECK-NEXT:    and.16b v3, v4, v1
-; CHECK-NEXT:    and.16b v1, v2, v7
-; CHECK-NEXT:    and.16b v2, v5, v6
+; CHECK-NEXT:    and.16b v0, v0, v3
+; CHECK-NEXT:    and.16b v3, v1, v4
+; CHECK-NEXT:    and.16b v1, v7, v2
+; CHECK-NEXT:    and.16b v2, v6, v5
 ; CHECK-NEXT:    ret
   %ext = zext <16 x i8> %a to <16 x i32>
   %cmp = icmp ugt <16 x i8> %a, <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
@@ -143,8 +143,8 @@ define <8 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v8i32(<8 x i8>
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
 ; CHECK-NEXT:    cmhi.4s v3, v0, v1
 ; CHECK-NEXT:    cmhi.4s v1, v2, v1
-; CHECK-NEXT:    and.16b v1, v2, v1
-; CHECK-NEXT:    and.16b v0, v0, v3
+; CHECK-NEXT:    and.16b v1, v1, v2
+; CHECK-NEXT:    and.16b v0, v3, v0
 ; CHECK-NEXT:    ret
   %ext = zext <8 x i8> %a to <8 x i32>
   %cmp = icmp ugt <8 x i8> %a, <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
@@ -160,8 +160,8 @@ define <8 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v8i32_2(<8 x i1
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
 ; CHECK-NEXT:    cmhi.4s v3, v0, v1
 ; CHECK-NEXT:    cmhi.4s v1, v2, v1
-; CHECK-NEXT:    and.16b v1, v2, v1
-; CHECK-NEXT:    and.16b v0, v0, v3
+; CHECK-NEXT:    and.16b v1, v1, v2
+; CHECK-NEXT:    and.16b v0, v3, v0
 ; CHECK-NEXT:    ret
   %ext = zext <8 x i16> %a to <8 x i32>
   %cmp = icmp ugt <8 x i16> %a, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
@@ -179,8 +179,8 @@ define <8 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v8i32_from_v8i1
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
 ; CHECK-NEXT:    cmhi.4s v3, v0, v1
 ; CHECK-NEXT:    cmhi.4s v1, v2, v1
-; CHECK-NEXT:    and.16b v1, v2, v1
-; CHECK-NEXT:    and.16b v0, v0, v3
+; CHECK-NEXT:    and.16b v1, v1, v2
+; CHECK-NEXT:    and.16b v0, v3, v0
 ; CHECK-NEXT:    ret
   %ext = zext <8 x i15> %a to <8 x i32>
   %cmp = icmp ugt <8 x i15> %a, <i15 10, i15 10, i15 10, i15 10, i15 10, i15 10, i15 10, i15 10>
@@ -197,8 +197,8 @@ define <7 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v7i32(<7 x i16>
 ; CHECK-NEXT:    ushll2.4s v0, v0, #0
 ; CHECK-NEXT:    sshll.4s v3, v1, #0
 ; CHECK-NEXT:    sshll2.4s v1, v1, #0
-; CHECK-NEXT:    and.16b v2, v2, v3
-; CHECK-NEXT:    and.16b v0, v0, v1
+; CHECK-NEXT:    and.16b v2, v3, v2
+; CHECK-NEXT:    and.16b v0, v1, v0
 ; CHECK-NEXT:    mov.s w1, v2[1]
 ; CHECK-NEXT:    mov.s w2, v2[2]
 ; CHECK-NEXT:    mov.s w3, v2[3]
@@ -244,7 +244,7 @@ define <4 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v4i32(<4 x i16>
 ; CHECK-NEXT:    movi.4s v1, #10
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
 ; CHECK-NEXT:    cmhi.4s v1, v0, v1
-; CHECK-NEXT:    and.16b v0, v0, v1
+; CHECK-NEXT:    and.16b v0, v1, v0
 ; CHECK-NEXT:    ret
   %ext = zext <4 x i16> %a to <4 x i32>
   %cmp = icmp ugt <4 x i16> %a, <i16 10, i16 10, i16 10, i16 10>
@@ -259,7 +259,7 @@ define <2 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v2i32(<2 x i16>
 ; CHECK-NEXT:    movi.2s v2, #10
 ; CHECK-NEXT:    and.8b v0, v0, v1
 ; CHECK-NEXT:    cmhi.2s v1, v0, v2
-; CHECK-NEXT:    and.8b v0, v0, v1
+; CHECK-NEXT:    and.8b v0, v1, v0
 ; CHECK-NEXT:    ret
   %ext = zext <2 x i16> %a to <2 x i32>
   %cmp = icmp ugt <2 x i16> %a, <i16 10, i16 10>
@@ -275,8 +275,8 @@ define <8 x i32> @same_zext_used_in_cmp_eq_and_select_v8i32(<8 x i16> %a) {
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
 ; CHECK-NEXT:    cmeq.4s v3, v0, v1
 ; CHECK-NEXT:    cmeq.4s v1, v2, v1
-; CHECK-NEXT:    and.16b v1, v2, v1
-; CHECK-NEXT:    and.16b v0, v0, v3
+; CHECK-NEXT:    and.16b v1, v1, v2
+; CHECK-NEXT:    and.16b v0, v3, v0
 ; CHECK-NEXT:    ret
   %ext = zext <8 x i16> %a to <8 x i32>
   %cmp = icmp eq <8 x i16> %a, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
@@ -293,8 +293,8 @@ define <8 x i32> @same_zext_used_in_cmp_eq_and_select_v8i32_from_v8i13(<8 x i13>
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
 ; CHECK-NEXT:    cmeq.4s v3, v0, v1
 ; CHECK-NEXT:    cmeq.4s v1, v2, v1
-; CHECK-NEXT:    and.16b v1, v2, v1
-; CHECK-NEXT:    and.16b v0, v0, v3
+; CHECK-NEXT:    and.16b v1, v1, v2
+; CHECK-NEXT:    and.16b v0, v3, v0
 ; CHECK-NEXT:    ret
   %ext = zext <8 x i13> %a to <8 x i32>
   %cmp = icmp eq <8 x i13> %a, <i13 10, i13 10, i13 10, i13 10, i13 10, i13 10, i13 10, i13 10>
@@ -358,16 +358,16 @@ define <16 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_other_use(<16
 ; CHECK-NEXT:    and.16b v6, v6, v26
 ; CHECK-NEXT:    sshll2.2d v26, v16, #0
 ; CHECK-NEXT:    and.16b v27, v4, v27
-; CHECK-NEXT:    and.16b v4, v0, v18
-; CHECK-NEXT:    and.16b v0, v24, v16
+; CHECK-NEXT:    and.16b v4, v18, v0
+; CHECK-NEXT:    and.16b v0, v16, v24
 ; CHECK-NEXT:    stp q7, q21, [x0, #96]
 ; CHECK-NEXT:    sshll.2d v21, v16, #0
 ; CHECK-NEXT:    and.16b v5, v5, v22
 ; CHECK-NEXT:    and.16b v7, v3, v23
-; CHECK-NEXT:    and.16b v3, v19, v20
+; CHECK-NEXT:    and.16b v3, v20, v19
 ; CHECK-NEXT:    stp q5, q6, [x0, #64]
 ; CHECK-NEXT:    and.16b v6, v2, v26
-; CHECK-NEXT:    and.16b v2, v25, v17
+; CHECK-NEXT:    and.16b v2, v17, v25
 ; CHECK-NEXT:    and.16b v5, v1, v21
 ; CHECK-NEXT:    mov.16b v1, v3
 ; CHECK-NEXT:    mov.16b v3, v4
@@ -397,10 +397,10 @@ define <16 x i32> @same_sext_used_in_cmp_signed_pred_and_select_v16i32(<16 x i8>
 ; CHECK-NEXT:    cmgt.4s v7, v2, v1
 ; CHECK-NEXT:    cmgt.4s v6, v5, v1
 ; CHECK-NEXT:    cmgt.4s v1, v4, v1
-; CHECK-NEXT:    and.16b v0, v3, v0
-; CHECK-NEXT:    and.16b v3, v4, v1
-; CHECK-NEXT:    and.16b v1, v2, v7
-; CHECK-NEXT:    and.16b v2, v5, v6
+; CHECK-NEXT:    and.16b v0, v0, v3
+; CHECK-NEXT:    and.16b v3, v1, v4
+; CHECK-NEXT:    and.16b v1, v7, v2
+; CHECK-NEXT:    and.16b v2, v6, v5
 ; CHECK-NEXT:    ret
 entry:
   %ext = sext <16 x i8> %a to <16 x i32>
@@ -417,8 +417,8 @@ define <8 x i32> @same_sext_used_in_cmp_eq_and_select_v8i32(<8 x i16> %a) {
 ; CHECK-NEXT:    sshll.4s v0, v0, #0
 ; CHECK-NEXT:    cmeq.4s v3, v0, v1
 ; CHECK-NEXT:    cmeq.4s v1, v2, v1
-; CHECK-NEXT:    and.16b v1, v2, v1
-; CHECK-NEXT:    and.16b v0, v0, v3
+; CHECK-NEXT:    and.16b v1, v1, v2
+; CHECK-NEXT:    and.16b v0, v3, v0
 ; CHECK-NEXT:    ret
   %ext = sext <8 x i16> %a to <8 x i32>
   %cmp = icmp eq <8 x i16> %a, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
@@ -438,8 +438,8 @@ define <8 x i32> @same_sext_used_in_cmp_eq_and_select_v8i32_from_v8i13(<8 x i13>
 ; CHECK-NEXT:    sshr.4s v2, v2, #19
 ; CHECK-NEXT:    cmeq.4s v3, v2, v1
 ; CHECK-NEXT:    cmeq.4s v1, v0, v1
-; CHECK-NEXT:    and.16b v1, v0, v1
-; CHECK-NEXT:    and.16b v0, v2, v3
+; CHECK-NEXT:    and.16b v1, v1, v0
+; CHECK-NEXT:    and.16b v0, v3, v2
 ; CHECK-NEXT:    ret
   %ext = sext <8 x i13> %a to <8 x i32>
   %cmp = icmp eq <8 x i13> %a, <i13 10, i13 10, i13 10, i13 10, i13 10, i13 10, i13 10, i13 10>
@@ -480,8 +480,8 @@ define <8 x i32> @same_sext_used_in_cmp_signed_pred_and_select_v8i32(<8 x i16> %
 ; CHECK-NEXT:    sshll.4s v0, v0, #0
 ; CHECK-NEXT:    cmgt.4s v3, v0, v1
 ; CHECK-NEXT:    cmgt.4s v1, v2, v1
-; CHECK-NEXT:    and.16b v1, v2, v1
-; CHECK-NEXT:    and.16b v0, v0, v3
+; CHECK-NEXT:    and.16b v1, v1, v2
+; CHECK-NEXT:    and.16b v0, v3, v0
 ; CHECK-NEXT:    ret
 entry:
   %ext = sext <8 x i16> %a to <8 x i32>
@@ -502,8 +502,8 @@ define <8 x i32> @same_sext_used_in_cmp_unsigned_pred_and_select_v8i32_from_v8i1
 ; CHECK-NEXT:    sshr.4s v2, v2, #17
 ; CHECK-NEXT:    cmge.4s v3, v2, v1
 ; CHECK-NEXT:    cmge.4s v1, v0, v1
-; CHECK-NEXT:    and.16b v1, v0, v1
-; CHECK-NEXT:    and.16b v0, v2, v3
+; CHECK-NEXT:    and.16b v1, v1, v0
+; CHECK-NEXT:    and.16b v0, v3, v2
 ; CHECK-NEXT:    ret
   %ext = sext <8 x i15> %a to <8 x i32>
   %cmp = icmp sge <8 x i15> %a, <i15 10, i15 10, i15 10, i15 10, i15 10, i15 10, i15 10, i15 10>
@@ -524,11 +524,11 @@ define <16 x i32> @same_sext_used_in_cmp_unsigned_pred_and_select(<16 x i8> %a)
 ; CHECK-NEXT:    ext.16b v5, v0, v0, #8
 ; CHECK-NEXT:    ext.16b v6, v3, v3, #8
 ; CHECK-NEXT:    ext.16b v7, v1, v1, #8
-; CHECK-NEXT:    and.8b v2, v2, v3
-; CHECK-NEXT:    and.8b v1, v0, v1
+; CHECK-NEXT:    and.8b v2, v3, v2
+; CHECK-NEXT:    and.8b v1, v1, v0
 ; CHECK-NEXT:    sshll.4s v0, v2, #0
-; CHECK-NEXT:    and.8b v3, v5, v7
-; CHECK-NEXT:    and.8b v4, v4, v6
+; CHECK-NEXT:    and.8b v3, v7, v5
+; CHECK-NEXT:    and.8b v4, v6, v4
 ; CHECK-NEXT:    sshll.4s v2, v1, #0
 ; CHECK-NEXT:    sshll.4s v3, v3, #0
 ; CHECK-NEXT:    sshll.4s v1, v4, #0
@@ -556,10 +556,10 @@ define <16 x i32> @same_zext_used_in_cmp_signed_pred_and_select_can_convert_to_u
 ; CHECK-NEXT:    sshll.4s v7, v1, #0
 ; CHECK-NEXT:    sshll2.4s v16, v3, #0
 ; CHECK-NEXT:    sshll2.4s v1, v1, #0
-; CHECK-NEXT:    and.16b v0, v4, v0
-; CHECK-NEXT:    and.16b v3, v6, v1
-; CHECK-NEXT:    and.16b v1, v2, v16
-; CHECK-NEXT:    and.16b v2, v5, v7
+; CHECK-NEXT:    and.16b v0, v0, v4
+; CHECK-NEXT:    and.16b v3, v1, v6
+; CHECK-NEXT:    and.16b v1, v16, v2
+; CHECK-NEXT:    and.16b v2, v7, v5
 ; CHECK-NEXT:    ret
 entry:
   %ext = zext <16 x i8> %a to <16 x i32>
@@ -604,10 +604,10 @@ define void @extension_in_loop_v16i8_to_v16i32(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    sshll.4s v6, v6, #0
 ; CHECK-NEXT:    sshll2.4s v19, v5, #0
 ; CHECK-NEXT:    sshll.4s v5, v5, #0
-; CHECK-NEXT:    and.16b v7, v7, v17
-; CHECK-NEXT:    and.16b v6, v16, v6
-; CHECK-NEXT:    and.16b v16, v18, v19
-; CHECK-NEXT:    and.16b v4, v4, v5
+; CHECK-NEXT:    and.16b v7, v17, v7
+; CHECK-NEXT:    and.16b v6, v6, v16
+; CHECK-NEXT:    and.16b v16, v19, v18
+; CHECK-NEXT:    and.16b v4, v5, v4
 ; CHECK-NEXT:    stp q6, q7, [x1, #32]
 ; CHECK-NEXT:    stp q4, q16, [x1], #64
 ; CHECK-NEXT:    b.ne LBB24_1
@@ -674,10 +674,10 @@ define void @extension_in_loop_as_shuffle_v16i8_to_v16i32(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    sshll.4s v6, v6, #0
 ; CHECK-NEXT:    sshll2.4s v19, v5, #0
 ; CHECK-NEXT:    sshll.4s v5, v5, #0
-; CHECK-NEXT:    and.16b v7, v7, v17
-; CHECK-NEXT:    and.16b v6, v16, v6
-; CHECK-NEXT:    and.16b v16, v18, v19
-; CHECK-NEXT:    and.16b v4, v4, v5
+; CHECK-NEXT:    and.16b v7, v17, v7
+; CHECK-NEXT:    and.16b v6, v6, v16
+; CHECK-NEXT:    and.16b v16, v19, v18
+; CHECK-NEXT:    and.16b v4, v5, v4
 ; CHECK-NEXT:    stp q6, q7, [x1, #32]
 ; CHECK-NEXT:    stp q4, q16, [x1], #64
 ; CHECK-NEXT:    b.ne LBB25_1
@@ -745,10 +745,10 @@ define void @shuffle_in_loop_is_no_extend_v16i8_to_v16i32(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    sshll.4s v6, v6, #0
 ; CHECK-NEXT:    sshll2.4s v19, v5, #0
 ; CHECK-NEXT:    sshll.4s v5, v5, #0
-; CHECK-NEXT:    and.16b v7, v7, v17
-; CHECK-NEXT:    and.16b v6, v16, v6
-; CHECK-NEXT:    and.16b v16, v18, v19
-; CHECK-NEXT:    and.16b v4, v4, v5
+; CHECK-NEXT:    and.16b v7, v17, v7
+; CHECK-NEXT:    and.16b v6, v6, v16
+; CHECK-NEXT:    and.16b v16, v19, v18
+; CHECK-NEXT:    and.16b v4, v5, v4
 ; CHECK-NEXT:    stp q6, q7, [x1, #32]
 ; CHECK-NEXT:    stp q4, q16, [x1], #64
 ; CHECK-NEXT:    b.ne LBB26_1
diff --git a/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll b/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
index 4d091c2302658..96f009a4da02d 100644
--- a/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/ARM/fpclamptosat_vec.ll
@@ -174,7 +174,7 @@ define <2 x i32> @ustest_f64i32(<2 x double> %x) {
 ; CHECK-NEXT:    cmp r2, #0
 ; CHECK-NEXT:    mvnne r2, #0
 ; CHECK-NEXT:    vmov.32 d18[0], r2
-; CHECK-NEXT:    vand q8, q8, q9
+; CHECK-NEXT:    vand q8, q9, q8
 ; CHECK-NEXT:    vmovn.i64 d0, q8
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    pop {r4, r5, r11, pc}
@@ -483,8 +483,8 @@ define <4 x i32> @ustest_f32i32(<4 x float> %x) {
 ; CHECK-NEXT:    vmov.32 d20[0], r7
 ; CHECK-NEXT:    mvnne r4, #0
 ; CHECK-NEXT:    vmov.32 d18[0], r4
-; CHECK-NEXT:    vand q10, q4, q10
-; CHECK-NEXT:    vand q8, q8, q9
+; CHECK-NEXT:    vand q10, q10, q4
+; CHECK-NEXT:    vand q8, q9, q8
 ; CHECK-NEXT:    vmovn.i64 d1, q10
 ; CHECK-NEXT:    vmovn.i64 d0, q8
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
@@ -995,8 +995,8 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-NEON-NEXT:    vmov.32 d20[0], r6
 ; CHECK-NEON-NEXT:    mvnne r7, #0
 ; CHECK-NEON-NEXT:    vmov.32 d18[0], r7
-; CHECK-NEON-NEXT:    vand q10, q4, q10
-; CHECK-NEON-NEXT:    vand q8, q8, q9
+; CHECK-NEON-NEXT:    vand q10, q10, q4
+; CHECK-NEON-NEXT:    vand q8, q9, q8
 ; CHECK-NEON-NEXT:    vmovn.i64 d1, q10
 ; CHECK-NEON-NEXT:    vmovn.i64 d0, q8
 ; CHECK-NEON-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
@@ -1097,8 +1097,8 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-FP16-NEXT:    vmov.32 d20[0], r7
 ; CHECK-FP16-NEXT:    mvnne r6, #0
 ; CHECK-FP16-NEXT:    vmov.32 d18[0], r6
-; CHECK-FP16-NEXT:    vand q10, q4, q10
-; CHECK-FP16-NEXT:    vand q8, q8, q9
+; CHECK-FP16-NEXT:    vand q10, q10, q4
+; CHECK-FP16-NEXT:    vand q8, q9, q8
 ; CHECK-FP16-NEXT:    vmovn.i64 d1, q10
 ; CHECK-FP16-NEXT:    vmovn.i64 d0, q8
 ; CHECK-FP16-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
diff --git a/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll b/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll
index ec7516524ee67..6706d25ae01d2 100644
--- a/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll
+++ b/llvm/test/CodeGen/ARM/minnum-maxnum-intrinsics.ll
@@ -1362,7 +1362,7 @@ define void @pr65820(ptr %y, <4 x float> %splat) {
 ; ARMV7-NEXT:    vmov d16, r2, r3
 ; ARMV7-NEXT:    vdup.32 q8, d16[0]
 ; ARMV7-NEXT:    vcgt.f32 q9, q8, #0
-; ARMV7-NEXT:    vand q8, q8, q9
+; ARMV7-NEXT:    vand q8, q9, q8
 ; ARMV7-NEXT:    vst1.32 {d16, d17}, [r0]
 ; ARMV7-NEXT:    bx lr
 ;
diff --git a/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll b/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll
index 5a861be95977d..c61b7841b95ac 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/xvmskcond.ll
@@ -383,8 +383,7 @@ define i8 @xvmsk_eq_vsel_slt_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2)
 ; CHECK-LABEL: xvmsk_eq_vsel_slt_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xvseq.w $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvrepli.b $xr1, -1
-; CHECK-NEXT:    xvbitsel.v $xr0, $xr2, $xr1, $xr0
+; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr2
 ; CHECK-NEXT:    xvmskltz.w $xr0, $xr0
 ; CHECK-NEXT:    xvpickve2gr.wu $a0, $xr0, 0
 ; CHECK-NEXT:    xvpickve2gr.wu $a1, $xr0, 4
diff --git a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll
index 1c10e6c3087ad..4d2ddeb2889bb 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll
@@ -6,8 +6,8 @@ define void @select_v16i8_imm(ptr %res, ptr %a0) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vld $vr0, $a1, 0
 ; CHECK-NEXT:    vrepli.h $vr1, -256
-; CHECK-NEXT:    vbitseli.b $vr1, $vr0, 255
-; CHECK-NEXT:    vst $vr1, $a0, 0
+; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
+; CHECK-NEXT:    vst $vr0, $a0, 0
 ; CHECK-NEXT:    ret
   %v0 = load <16 x i8>, ptr %a0
   %sel = select <16 x i1> <i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %v0
diff --git a/llvm/test/CodeGen/Mips/msa/compare_float.ll b/llvm/test/CodeGen/Mips/msa/compare_float.ll
index 2656cb839768c..9c05ab4dabc9e 100644
--- a/llvm/test/CodeGen/Mips/msa/compare_float.ll
+++ b/llvm/test/CodeGen/Mips/msa/compare_float.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=mips-elf -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
 ; RUN: llc -mtriple=mipsel-elf -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
 
@@ -7,7 +8,11 @@ declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind
 declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind
 
 define void @false_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: false_v4f32:
+; CHECK-LABEL: false_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ldi.b $w0, 0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
   %2 = load <4 x float>, ptr %b
@@ -17,13 +22,14 @@ define void @false_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ret void
 
   ; (setcc $a, $b, SETFALSE) is always folded, so we won't get fcaf:
-  ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], 0
-  ; CHECK-DAG: st.w [[R1]], 0($4)
-  ; CHECK: .size false_v4f32
 }
 
 define void @false_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: false_v2f64:
+; CHECK-LABEL: false_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ldi.b $w0, 0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
   %2 = load <2 x double>, ptr %b
@@ -33,455 +39,482 @@ define void @false_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ret void
 
   ; (setcc $a, $b, SETFALSE) is always folded
-  ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], 0
-  ; CHECK-DAG: st.w [[R1]], 0($4)
-  ; CHECK: .size false_v2f64
 }
 
 define void @oeq_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: oeq_v4f32:
+; CHECK-LABEL: oeq_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($6)
+; CHECK-NEXT:    ld.w $w1, 0($5)
+; CHECK-NEXT:    fceq.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp oeq <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fceq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size oeq_v4f32
 }
 
 define void @oeq_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: oeq_v2f64:
+; CHECK-LABEL: oeq_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($6)
+; CHECK-NEXT:    ld.d $w1, 0($5)
+; CHECK-NEXT:    fceq.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp oeq <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fceq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size oeq_v2f64
 }
 
 define void @oge_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: oge_v4f32:
+; CHECK-LABEL: oge_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($5)
+; CHECK-NEXT:    ld.w $w1, 0($6)
+; CHECK-NEXT:    fcle.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp oge <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fcle.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size oge_v4f32
 }
 
 define void @oge_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: oge_v2f64:
+; CHECK-LABEL: oge_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($5)
+; CHECK-NEXT:    ld.d $w1, 0($6)
+; CHECK-NEXT:    fcle.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp oge <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fcle.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size oge_v2f64
 }
 
 define void @ogt_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ogt_v4f32:
+; CHECK-LABEL: ogt_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($5)
+; CHECK-NEXT:    ld.w $w1, 0($6)
+; CHECK-NEXT:    fclt.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ogt <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fclt.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ogt_v4f32
 }
 
 define void @ogt_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ogt_v2f64:
+; CHECK-LABEL: ogt_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($5)
+; CHECK-NEXT:    ld.d $w1, 0($6)
+; CHECK-NEXT:    fclt.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ogt <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fclt.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ogt_v2f64
 }
 
 define void @ole_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ole_v4f32:
+; CHECK-LABEL: ole_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($6)
+; CHECK-NEXT:    ld.w $w1, 0($5)
+; CHECK-NEXT:    fcle.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ole <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fcle.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ole_v4f32
 }
 
 define void @ole_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ole_v2f64:
+; CHECK-LABEL: ole_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($6)
+; CHECK-NEXT:    ld.d $w1, 0($5)
+; CHECK-NEXT:    fcle.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ole <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fcle.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ole_v2f64
 }
 
 define void @olt_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: olt_v4f32:
+; CHECK-LABEL: olt_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($6)
+; CHECK-NEXT:    ld.w $w1, 0($5)
+; CHECK-NEXT:    fclt.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp olt <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fclt.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size olt_v4f32
 }
 
 define void @olt_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: olt_v2f64:
+; CHECK-LABEL: olt_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($6)
+; CHECK-NEXT:    ld.d $w1, 0($5)
+; CHECK-NEXT:    fclt.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp olt <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fclt.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size olt_v2f64
 }
 
 define void @one_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: one_v4f32:
+; CHECK-LABEL: one_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($6)
+; CHECK-NEXT:    ld.w $w1, 0($5)
+; CHECK-NEXT:    fcne.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp one <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fcne.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size one_v4f32
 }
 
 define void @one_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: one_v2f64:
+; CHECK-LABEL: one_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($6)
+; CHECK-NEXT:    ld.d $w1, 0($5)
+; CHECK-NEXT:    fcne.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp one <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fcne.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size one_v2f64
 }
 
 define void @ord_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ord_v4f32:
+; CHECK-LABEL: ord_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($6)
+; CHECK-NEXT:    ld.w $w1, 0($5)
+; CHECK-NEXT:    fcor.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ord <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fcor.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ord_v4f32
 }
 
 define void @ord_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ord_v2f64:
+; CHECK-LABEL: ord_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($6)
+; CHECK-NEXT:    ld.d $w1, 0($5)
+; CHECK-NEXT:    fcor.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ord <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fcor.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ord_v2f64
 }
 
 define void @ueq_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ueq_v4f32:
+; CHECK-LABEL: ueq_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($6)
+; CHECK-NEXT:    ld.w $w1, 0($5)
+; CHECK-NEXT:    fcueq.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ueq <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fcueq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ueq_v4f32
 }
 
 define void @ueq_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ueq_v2f64:
+; CHECK-LABEL: ueq_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($6)
+; CHECK-NEXT:    ld.d $w1, 0($5)
+; CHECK-NEXT:    fcueq.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ueq <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fcueq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ueq_v2f64
 }
 
 define void @uge_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: uge_v4f32:
+; CHECK-LABEL: uge_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($5)
+; CHECK-NEXT:    ld.w $w1, 0($6)
+; CHECK-NEXT:    fcule.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp uge <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fcule.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size uge_v4f32
 }
 
 define void @uge_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: uge_v2f64:
+; CHECK-LABEL: uge_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($5)
+; CHECK-NEXT:    ld.d $w1, 0($6)
+; CHECK-NEXT:    fcule.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp uge <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fcule.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size uge_v2f64
 }
 
 define void @ugt_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ugt_v4f32:
+; CHECK-LABEL: ugt_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($5)
+; CHECK-NEXT:    ld.w $w1, 0($6)
+; CHECK-NEXT:    fcult.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ugt <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fcult.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ugt_v4f32
 }
 
 define void @ugt_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ugt_v2f64:
+; CHECK-LABEL: ugt_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($5)
+; CHECK-NEXT:    ld.d $w1, 0($6)
+; CHECK-NEXT:    fcult.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ugt <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fcult.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ugt_v2f64
 }
 
 define void @ule_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ule_v4f32:
+; CHECK-LABEL: ule_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($6)
+; CHECK-NEXT:    ld.w $w1, 0($5)
+; CHECK-NEXT:    fcule.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ule <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fcule.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ule_v4f32
 }
 
 define void @ule_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ule_v2f64:
+; CHECK-LABEL: ule_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($6)
+; CHECK-NEXT:    ld.d $w1, 0($5)
+; CHECK-NEXT:    fcule.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ule <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fcule.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ule_v2f64
 }
 
 define void @ult_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ult_v4f32:
+; CHECK-LABEL: ult_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($6)
+; CHECK-NEXT:    ld.w $w1, 0($5)
+; CHECK-NEXT:    fcult.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ult <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fcult.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ult_v4f32
 }
 
 define void @ult_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: ult_v2f64:
+; CHECK-LABEL: ult_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($6)
+; CHECK-NEXT:    ld.d $w1, 0($5)
+; CHECK-NEXT:    fcult.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ult <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fcult.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size ult_v2f64
 }
 
 define void @uno_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: uno_v4f32:
+; CHECK-LABEL: uno_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($6)
+; CHECK-NEXT:    ld.w $w1, 0($5)
+; CHECK-NEXT:    fcun.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp uno <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  ; CHECK-DAG: fcun.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <4 x i32> %4, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size uno_v4f32
 }
 
 define void @uno_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: uno_v2f64:
+; CHECK-LABEL: uno_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($6)
+; CHECK-NEXT:    ld.d $w1, 0($5)
+; CHECK-NEXT:    fcun.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp uno <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  ; CHECK-DAG: fcun.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <2 x i64> %4, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size uno_v2f64
 }
 
 define void @true_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: true_v4f32:
+; CHECK-LABEL: true_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ldi.b $w0, -1
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
   %2 = load <4 x float>, ptr %b
@@ -491,13 +524,14 @@ define void @true_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ret void
 
   ; (setcc $a, $b, SETTRUE) is always folded, so we won't get fcaf:
-  ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], -1
-  ; CHECK-DAG: st.w [[R1]], 0($4)
-  ; CHECK: .size true_v4f32
 }
 
 define void @true_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: true_v2f64:
+; CHECK-LABEL: true_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ldi.b $w0, -1
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
   %2 = load <2 x double>, ptr %b
@@ -507,155 +541,160 @@ define void @true_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ret void
 
   ; (setcc $a, $b, SETTRUE) is always folded.
-  ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], -1
-  ; CHECK-DAG: st.w [[R1]], 0($4)
-  ; CHECK: .size true_v2f64
 }
 
 define void @bsel_v4f32(ptr %d, ptr %a, ptr %b,
+; CHECK-LABEL: bsel_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($5)
+; CHECK-NEXT:    ld.w $w1, 0($6)
+; CHECK-NEXT:    fclt.w $w1, $w1, $w0
+; CHECK-NEXT:    ld.w $w2, 0($7)
+; CHECK-NEXT:    bsel.v $w1, $w2, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w1, 0($4)
                           ptr %c) nounwind {
-  ; CHECK: bsel_v4f32:
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = load <4 x float>, ptr %c
-  ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
   %4 = fcmp ogt <4 x float> %1, %2
-  ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <4 x i1> %4, <4 x float> %1, <4 x float> %3
   ; Note that IfSet and IfClr are swapped since the condition is inverted
-  ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
   store <4 x float> %5, ptr %d
-  ; CHECK-DAG: st.w [[R4]], 0($4)
 
   ret void
-  ; CHECK: .size bsel_v4f32
 }
 
 define void @bsel_v2f64(ptr %d, ptr %a, ptr %b,
+; CHECK-LABEL: bsel_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($5)
+; CHECK-NEXT:    ld.d $w1, 0($6)
+; CHECK-NEXT:    fclt.d $w1, $w1, $w0
+; CHECK-NEXT:    ld.d $w2, 0($7)
+; CHECK-NEXT:    bsel.v $w1, $w2, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w1, 0($4)
                           ptr %c) nounwind {
-  ; CHECK: bsel_v2f64:
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = load <2 x double>, ptr %c
-  ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
   %4 = fcmp ogt <2 x double> %1, %2
-  ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <2 x i1> %4, <2 x double> %1, <2 x double> %3
   ; Note that IfSet and IfClr are swapped since the condition is inverted
-  ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
   store <2 x double> %5, ptr %d
-  ; CHECK-DAG: st.d [[R4]], 0($4)
 
   ret void
-  ; CHECK: .size bsel_v2f64
 }
 
 define void @bseli_v4f32(ptr %d, ptr %a, ptr %b,
+; CHECK-LABEL: bseli_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($5)
+; CHECK-NEXT:    ld.w $w1, 0($6)
+; CHECK-NEXT:    fclt.w $w1, $w1, $w0
+; CHECK-NEXT:    and.v $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
                           ptr %c) nounwind {
-  ; CHECK: bseli_v4f32:
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ogt <4 x float> %1, %2
-  ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <4 x i1> %3, <4 x float> %1, <4 x float> zeroinitializer
   ; Note that IfSet and IfClr are swapped since the condition is inverted
-  ; CHECK-DAG: bsel.v [[R4]], [[R3:\$w[0-9]+]], [[R1]]
   store <4 x float> %4, ptr %d
-  ; CHECK-DAG: st.w [[R4]], 0($4)
 
   ret void
-  ; CHECK: .size bseli_v4f32
 }
 
 define void @bseli_v2f64(ptr %d, ptr %a, ptr %b,
+; CHECK-LABEL: bseli_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($5)
+; CHECK-NEXT:    ld.d $w1, 0($6)
+; CHECK-NEXT:    fclt.d $w1, $w1, $w0
+; CHECK-NEXT:    and.v $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
                           ptr %c) nounwind {
-  ; CHECK: bseli_v2f64:
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ogt <2 x double> %1, %2
-  ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <2 x i1> %3, <2 x double> %1, <2 x double> zeroinitializer
   ; Note that IfSet and IfClr are swapped since the condition is inverted
-  ; CHECK-DAG: bsel.v [[R4]], [[R3:\$w[0-9]+]], [[R1]]
   store <2 x double> %4, ptr %d
-  ; CHECK-DAG: st.d [[R4]], 0($4)
 
   ret void
-  ; CHECK: .size bseli_v2f64
 }
 
 define void @max_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: max_v4f32:
+; CHECK-LABEL: max_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($6)
+; CHECK-NEXT:    ld.w $w1, 0($5)
+; CHECK-NEXT:    fmax.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %1, <4 x float> %2)
-  ; CHECK-DAG: fmax.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <4 x float> %3, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size max_v4f32
 }
 
 define void @max_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: max_v2f64:
+; CHECK-LABEL: max_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($6)
+; CHECK-NEXT:    ld.d $w1, 0($5)
+; CHECK-NEXT:    fmax.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %1, <2 x double> %2)
-  ; CHECK-DAG: fmax.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <2 x double> %3, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size max_v2f64
 }
 
 define void @min_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: min_v4f32:
+; CHECK-LABEL: min_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.w $w0, 0($6)
+; CHECK-NEXT:    ld.w $w1, 0($5)
+; CHECK-NEXT:    fmin.w $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.w $w0, 0($4)
 
   %1 = load <4 x float>, ptr %a
-  ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = load <4 x float>, ptr %b
-  ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %1, <4 x float> %2)
-  ; CHECK-DAG: fmin.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <4 x float> %3, ptr %c
-  ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size min_v4f32
 }
 
 define void @min_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
-  ; CHECK: min_v2f64:
+; CHECK-LABEL: min_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ld.d $w0, 0($6)
+; CHECK-NEXT:    ld.d $w1, 0($5)
+; CHECK-NEXT:    fmin.d $w0, $w1, $w0
+; CHECK-NEXT:    jr $ra
+; CHECK-NEXT:    st.d $w0, 0($4)
 
   %1 = load <2 x double>, ptr %a
-  ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = load <2 x double>, ptr %b
-  ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %1, <2 x double> %2)
-  ; CHECK-DAG: fmin.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   store <2 x double> %3, ptr %c
-  ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
-  ; CHECK: .size min_v2f64
 }
diff --git a/llvm/test/CodeGen/PowerPC/recipest.ll b/llvm/test/CodeGen/PowerPC/recipest.ll
index 96e2c6c3e081a..2598a410b8761 100644
--- a/llvm/test/CodeGen/PowerPC/recipest.ll
+++ b/llvm/test/CodeGen/PowerPC/recipest.ll
@@ -1024,15 +1024,16 @@ define <4 x float> @hoo3_fmf(<4 x float> %a) #1 {
 ; CHECK-P7-NEXT:    vslw 3, 3, 3
 ; CHECK-P7-NEXT:    lvx 0, 0, 3
 ; CHECK-P7-NEXT:    addis 3, 2, .LCPI25_1 at toc@ha
-; CHECK-P7-NEXT:    addi 3, 3, .LCPI25_1 at toc@l
 ; CHECK-P7-NEXT:    vmaddfp 5, 2, 4, 3
-; CHECK-P7-NEXT:    lvx 1, 0, 3
+; CHECK-P7-NEXT:    addi 3, 3, .LCPI25_1 at toc@l
 ; CHECK-P7-NEXT:    vmaddfp 4, 5, 4, 0
-; CHECK-P7-NEXT:    vmaddfp 5, 5, 1, 3
+; CHECK-P7-NEXT:    lvx 0, 0, 3
+; CHECK-P7-NEXT:    vmaddfp 5, 5, 0, 3
 ; CHECK-P7-NEXT:    vmaddfp 3, 5, 4, 3
 ; CHECK-P7-NEXT:    vxor 4, 4, 4
 ; CHECK-P7-NEXT:    vcmpeqfp 2, 2, 4
-; CHECK-P7-NEXT:    vsel 2, 3, 4, 2
+; CHECK-P7-NEXT:    vnot 2, 2
+; CHECK-P7-NEXT:    vand 2, 2, 3
 ; CHECK-P7-NEXT:    blr
 ;
 ; CHECK-P8-LABEL: hoo3_fmf:
diff --git a/llvm/test/CodeGen/PowerPC/sat-add.ll b/llvm/test/CodeGen/PowerPC/sat-add.ll
index d9b22bda85e44..34b703a981105 100644
--- a/llvm/test/CodeGen/PowerPC/sat-add.ll
+++ b/llvm/test/CodeGen/PowerPC/sat-add.ll
@@ -536,12 +536,11 @@ define <2 x i64> @unsigned_sat_constant_v2i64_using_cmp_sum(<2 x i64> %x) {
 ; CHECK-LABEL: unsigned_sat_constant_v2i64_using_cmp_sum:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addis 3, 2, .LCPI34_0 at toc@ha
-; CHECK-NEXT:    xxleqv 0, 0, 0
 ; CHECK-NEXT:    addi 3, 3, .LCPI34_0 at toc@l
 ; CHECK-NEXT:    lxvd2x 35, 0, 3
 ; CHECK-NEXT:    vaddudm 3, 2, 3
 ; CHECK-NEXT:    vcmpgtud 2, 2, 3
-; CHECK-NEXT:    xxsel 34, 35, 0, 34
+; CHECK-NEXT:    xxlor 34, 34, 35
 ; CHECK-NEXT:    blr
   %a = add <2 x i64> %x, <i64 42, i64 42>
   %c = icmp ugt <2 x i64> %x, %a
@@ -553,7 +552,6 @@ define <2 x i64> @unsigned_sat_constant_v2i64_using_cmp_notval(<2 x i64> %x) {
 ; CHECK-LABEL: unsigned_sat_constant_v2i64_using_cmp_notval:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addis 3, 2, .LCPI35_0 at toc@ha
-; CHECK-NEXT:    xxleqv 0, 0, 0
 ; CHECK-NEXT:    addi 3, 3, .LCPI35_0 at toc@l
 ; CHECK-NEXT:    lxvd2x 35, 0, 3
 ; CHECK-NEXT:    addis 3, 2, .LCPI35_1 at toc@ha
@@ -561,7 +559,7 @@ define <2 x i64> @unsigned_sat_constant_v2i64_using_cmp_notval(<2 x i64> %x) {
 ; CHECK-NEXT:    lxvd2x 36, 0, 3
 ; CHECK-NEXT:    vaddudm 3, 2, 3
 ; CHECK-NEXT:    vcmpgtud 2, 2, 4
-; CHECK-NEXT:    xxsel 34, 35, 0, 34
+; CHECK-NEXT:    xxlor 34, 34, 35
 ; CHECK-NEXT:    blr
   %a = add <2 x i64> %x, <i64 42, i64 42>
   %c = icmp ugt <2 x i64> %x, <i64 -43, i64 -43>
@@ -599,9 +597,8 @@ define <16 x i8> @unsigned_sat_variable_v16i8_using_cmp_notval(<16 x i8> %x, <16
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xxlnor 36, 35, 35
 ; CHECK-NEXT:    vaddubm 3, 2, 3
-; CHECK-NEXT:    xxleqv 0, 0, 0
 ; CHECK-NEXT:    vcmpgtub 2, 2, 4
-; CHECK-NEXT:    xxsel 34, 35, 0, 34
+; CHECK-NEXT:    xxlor 34, 34, 35
 ; CHECK-NEXT:    blr
   %noty = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %a = add <16 x i8> %x, %y
@@ -640,9 +637,8 @@ define <8 x i16> @unsigned_sat_variable_v8i16_using_cmp_notval(<8 x i16> %x, <8
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xxlnor 36, 35, 35
 ; CHECK-NEXT:    vadduhm 3, 2, 3
-; CHECK-NEXT:    xxleqv 0, 0, 0
 ; CHECK-NEXT:    vcmpgtuh 2, 2, 4
-; CHECK-NEXT:    xxsel 34, 35, 0, 34
+; CHECK-NEXT:    xxlor 34, 34, 35
 ; CHECK-NEXT:    blr
   %noty = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %a = add <8 x i16> %x, %y
@@ -681,9 +677,8 @@ define <4 x i32> @unsigned_sat_variable_v4i32_using_cmp_notval(<4 x i32> %x, <4
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xxlnor 36, 35, 35
 ; CHECK-NEXT:    vadduwm 3, 2, 3
-; CHECK-NEXT:    xxleqv 0, 0, 0
 ; CHECK-NEXT:    vcmpgtuw 2, 2, 4
-; CHECK-NEXT:    xxsel 34, 35, 0, 34
+; CHECK-NEXT:    xxlor 34, 34, 35
 ; CHECK-NEXT:    blr
   %noty = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
   %a = add <4 x i32> %x, %y
@@ -710,9 +705,8 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_sum(<2 x i64> %x, <2 x i
 ; CHECK-LABEL: unsigned_sat_variable_v2i64_using_cmp_sum:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vaddudm 3, 2, 3
-; CHECK-NEXT:    xxleqv 0, 0, 0
 ; CHECK-NEXT:    vcmpgtud 2, 2, 3
-; CHECK-NEXT:    xxsel 34, 35, 0, 34
+; CHECK-NEXT:    xxlor 34, 34, 35
 ; CHECK-NEXT:    blr
   %a = add <2 x i64> %x, %y
   %c = icmp ugt <2 x i64> %x, %a
@@ -725,9 +719,8 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_notval(<2 x i64> %x, <2
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    xxlnor 36, 35, 35
 ; CHECK-NEXT:    vaddudm 3, 2, 3
-; CHECK-NEXT:    xxleqv 0, 0, 0
 ; CHECK-NEXT:    vcmpgtud 2, 2, 4
-; CHECK-NEXT:    xxsel 34, 35, 0, 34
+; CHECK-NEXT:    xxlor 34, 34, 35
 ; CHECK-NEXT:    blr
   %noty = xor <2 x i64> %y, <i64 -1, i64 -1>
   %a = add <2 x i64> %x, %y
diff --git a/llvm/test/CodeGen/SystemZ/vec-max-min-zerosplat.ll b/llvm/test/CodeGen/SystemZ/vec-max-min-zerosplat.ll
index e8d4b2828c84b..12c7c729433d3 100644
--- a/llvm/test/CodeGen/SystemZ/vec-max-min-zerosplat.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-max-min-zerosplat.ll
@@ -1,12 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; Test vector maximum/minimum with a zero splat on z14.
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
 
 define <2 x double> @f1(<2 x double> %val) {
 ; CHECK-LABEL: f1:
-; CHECK: vgbm %v0, 0
-; CHECK-NEXT: vfmaxdb %v24, %v24, %v0, 4
-; CHECK-NEXT: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vgbm %v0, 0
+; CHECK-NEXT:    vfmaxdb %v24, %v24, %v0, 4
+; CHECK-NEXT:    br %r14
   %cmp = fcmp ogt <2 x double> %val,  zeroinitializer
   %ret = select <2 x i1> %cmp, <2 x double> %val, <2 x double> zeroinitializer
   ret <2 x double> %ret
@@ -14,9 +16,10 @@ define <2 x double> @f1(<2 x double> %val) {
 
 define <2 x double> @f2(<2 x double> %val) {
 ; CHECK-LABEL: f2:
-; CHECK: vgbm %v0, 0
-; CHECK-NEXT: vfmindb %v24, %v24, %v0, 4
-; CHECK-NEXT: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vgbm %v0, 0
+; CHECK-NEXT:    vfmindb %v24, %v24, %v0, 4
+; CHECK-NEXT:    br %r14
   %cmp = fcmp olt <2 x double> %val,  zeroinitializer
   %ret = select <2 x i1> %cmp, <2 x double> %val, <2 x double> zeroinitializer
   ret <2 x double> %ret
@@ -24,9 +27,10 @@ define <2 x double> @f2(<2 x double> %val) {
 
 define <4 x float> @f3(<4 x float> %val) {
 ; CHECK-LABEL: f3:
-; CHECK: vgbm %v0, 0
-; CHECK-NEXT: vfmaxsb %v24, %v24, %v0, 4
-; CHECK-NEXT: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vgbm %v0, 0
+; CHECK-NEXT:    vfmaxsb %v24, %v24, %v0, 4
+; CHECK-NEXT:    br %r14
   %cmp = fcmp ogt <4 x float> %val,  zeroinitializer
   %ret = select <4 x i1> %cmp, <4 x float> %val, <4 x float> zeroinitializer
   ret <4 x float> %ret
@@ -34,9 +38,10 @@ define <4 x float> @f3(<4 x float> %val) {
 
 define <4 x float> @f4(<4 x float> %val) {
 ; CHECK-LABEL: f4:
-; CHECK: vgbm %v0, 0
-; CHECK-NEXT: vfminsb %v24, %v24, %v0, 4
-; CHECK-NEXT: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vgbm %v0, 0
+; CHECK-NEXT:    vfminsb %v24, %v24, %v0, 4
+; CHECK-NEXT:    br %r14
   %cmp = fcmp olt <4 x float> %val,  zeroinitializer
   %ret = select <4 x i1> %cmp, <4 x float> %val, <4 x float> zeroinitializer
   ret <4 x float> %ret
@@ -44,10 +49,11 @@ define <4 x float> @f4(<4 x float> %val) {
 
 define <2 x double> @f5(<2 x double> %val) {
 ; CHECK-LABEL: f5:
-; CHECK: vgbm %v0, 0
-; CHECK-NEXT: vfchedb	%v1, %v0, %v24
-; CHECK-NEXT: vsel	%v24, %v0, %v24, %v1
-; CHECK-NEXT: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vgbm %v0, 0
+; CHECK-NEXT:    vfchedb %v0, %v0, %v24
+; CHECK-NEXT:    vnc %v24, %v24, %v0
+; CHECK-NEXT:    br %r14
   %cmp = fcmp ugt <2 x double> %val,  zeroinitializer
   %ret = select <2 x i1> %cmp, <2 x double> %val, <2 x double> zeroinitializer
   ret <2 x double> %ret
@@ -55,10 +61,11 @@ define <2 x double> @f5(<2 x double> %val) {
 
 define <2 x double> @f6(<2 x double> %val) {
 ; CHECK-LABEL: f6:
-; CHECK: vgbm %v0, 0
-; CHECK-NEXT: vfchedb	%v1, %v24, %v0
-; CHECK-NEXT: vsel	%v24, %v0, %v24, %v1
-; CHECK-NEXT: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vgbm %v0, 0
+; CHECK-NEXT:    vfchedb %v0, %v24, %v0
+; CHECK-NEXT:    vnc %v24, %v24, %v0
+; CHECK-NEXT:    br %r14
   %cmp = fcmp ult <2 x double> %val,  zeroinitializer
   %ret = select <2 x i1> %cmp, <2 x double> %val, <2 x double> zeroinitializer
   ret <2 x double> %ret
@@ -66,10 +73,11 @@ define <2 x double> @f6(<2 x double> %val) {
 
 define <4 x float> @f7(<4 x float> %val) {
 ; CHECK-LABEL: f7:
-; CHECK: vgbm %v0, 0
-; CHECK-NEXT: vfchesb	%v1, %v0, %v24
-; CHECK-NEXT: vsel	%v24, %v0, %v24, %v1
-; CHECK-NEXT: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vgbm %v0, 0
+; CHECK-NEXT:    vfchesb %v0, %v0, %v24
+; CHECK-NEXT:    vnc %v24, %v24, %v0
+; CHECK-NEXT:    br %r14
   %cmp = fcmp ugt <4 x float> %val,  zeroinitializer
   %ret = select <4 x i1> %cmp, <4 x float> %val, <4 x float> zeroinitializer
   ret <4 x float> %ret
@@ -77,10 +85,11 @@ define <4 x float> @f7(<4 x float> %val) {
 
 define <4 x float> @f8(<4 x float> %val) {
 ; CHECK-LABEL: f8:
-; CHECK: vgbm %v0, 0
-; CHECK-NEXT: vfchesb	%v1, %v24, %v0
-; CHECK-NEXT: vsel	%v24, %v0, %v24, %v1
-; CHECK-NEXT: br %r14
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vgbm %v0, 0
+; CHECK-NEXT:    vfchesb %v0, %v24, %v0
+; CHECK-NEXT:    vnc %v24, %v24, %v0
+; CHECK-NEXT:    br %r14
   %cmp = fcmp ult <4 x float> %val,  zeroinitializer
   %ret = select <4 x i1> %cmp, <4 x float> %val, <4 x float> zeroinitializer
   ret <4 x float> %ret



More information about the llvm-commits mailing list