[llvm-branch-commits] [llvm] 6a563ee - [ARM] Expand vXi1 VSELECT's

David Green via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Jan 19 10:01:23 PST 2021


Author: David Green
Date: 2021-01-19T17:56:50Z
New Revision: 6a563eef1321f742fa06482f4536cd41fb8e24c7

URL: https://github.com/llvm/llvm-project/commit/6a563eef1321f742fa06482f4536cd41fb8e24c7
DIFF: https://github.com/llvm/llvm-project/commit/6a563eef1321f742fa06482f4536cd41fb8e24c7.diff

LOG: [ARM] Expand vXi1 VSELECT's

We have no lowering for VSELECT vXi1, vXi1, vXi1, so mark them as
expanded to turn them into a series of logical operations.

Differential Revision: https://reviews.llvm.org/D94946

Added: 
    llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll

Modified: 
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/test/Analysis/CostModel/ARM/arith-overflow.ll
    llvm/test/Analysis/CostModel/ARM/arith-ssat.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 46c5efa2cf2f8..aabfad045d9fa 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -444,6 +444,8 @@ void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) {
     setOperationAction(ISD::LOAD, VT, Custom);
     setOperationAction(ISD::STORE, VT, Custom);
     setOperationAction(ISD::TRUNCATE, VT, Custom);
+    setOperationAction(ISD::VSELECT, VT, Expand);
+    setOperationAction(ISD::SELECT, VT, Expand);
   }
 }
 

diff  --git a/llvm/test/Analysis/CostModel/ARM/arith-overflow.ll b/llvm/test/Analysis/CostModel/ARM/arith-overflow.ll
index 172df86003569..0a29083f27f5c 100644
--- a/llvm/test/Analysis/CostModel/ARM/arith-overflow.ll
+++ b/llvm/test/Analysis/CostModel/ARM/arith-overflow.ll
@@ -68,20 +68,20 @@ define i32 @sadd(i32 %arg) {
 ; MVE-RECIP-LABEL: 'sadd'
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %I64 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 74 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.sadd.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 166 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.sadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 582 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.sadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 242 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.sadd.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 866 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.sadd.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %I32 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 14 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 38 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.sadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 90 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 306 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.sadd.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 1122 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.sadd.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %I16 = call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 14 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.sadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.sadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.sadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 298 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.sadd.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 1106 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.sadd.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 4260 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.sadd.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %I8 = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 14 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.sadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 28 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.sadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 56 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.sadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 1098 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.sadd.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 4244 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.sadd.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 16680 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.sadd.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; V8M-SIZE-LABEL: 'sadd'
@@ -384,20 +384,20 @@ define i32 @ssub(i32 %arg) {
 ; MVE-RECIP-LABEL: 'ssub'
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %I64 = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 undef, i64 undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 74 for instruction: %V2I64 = call { <2 x i64>, <2 x i1> } @llvm.ssub.with.overflow.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 166 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.ssub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 582 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.ssub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 242 for instruction: %V4I64 = call { <4 x i64>, <4 x i1> } @llvm.ssub.with.overflow.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 866 for instruction: %V8I64 = call { <8 x i64>, <8 x i1> } @llvm.ssub.with.overflow.v8i64(<8 x i64> undef, <8 x i64> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %I32 = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 undef, i32 undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 14 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 38 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.ssub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 90 for instruction: %V4I32 = call { <4 x i32>, <4 x i1> } @llvm.ssub.with.overflow.v4i32(<4 x i32> undef, <4 x i32> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 306 for instruction: %V8I32 = call { <8 x i32>, <8 x i1> } @llvm.ssub.with.overflow.v8i32(<8 x i32> undef, <8 x i32> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 1122 for instruction: %V16I32 = call { <16 x i32>, <16 x i1> } @llvm.ssub.with.overflow.v16i32(<16 x i32> undef, <16 x i32> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %I16 = call { i16, i1 } @llvm.ssub.with.overflow.i16(i16 undef, i16 undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 14 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.ssub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 22 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.ssub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 44 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.ssub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 298 for instruction: %V8I16 = call { <8 x i16>, <8 x i1> } @llvm.ssub.with.overflow.v8i16(<8 x i16> undef, <8 x i16> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 1106 for instruction: %V16I16 = call { <16 x i16>, <16 x i1> } @llvm.ssub.with.overflow.v16i16(<16 x i16> undef, <16 x i16> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 4260 for instruction: %V32I16 = call { <32 x i16>, <32 x i1> } @llvm.ssub.with.overflow.v32i16(<32 x i16> undef, <32 x i16> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %I8 = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 undef, i8 undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 14 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.ssub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 28 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.ssub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 56 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.ssub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 1098 for instruction: %V16I8 = call { <16 x i8>, <16 x i1> } @llvm.ssub.with.overflow.v16i8(<16 x i8> undef, <16 x i8> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 4244 for instruction: %V32I8 = call { <32 x i8>, <32 x i1> } @llvm.ssub.with.overflow.v32i8(<32 x i8> undef, <32 x i8> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 16680 for instruction: %V64I8 = call { <64 x i8>, <64 x i1> } @llvm.ssub.with.overflow.v64i8(<64 x i8> undef, <64 x i8> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
 ; V8M-SIZE-LABEL: 'ssub'

diff  --git a/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll b/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll
index bc8b23bc001fd..9cf5a9b8ab196 100644
--- a/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll
+++ b/llvm/test/Analysis/CostModel/ARM/arith-ssat.ll
@@ -86,8 +86,8 @@ define i32 @add(i32 %arg) {
 ; MVE-RECIP-LABEL: 'add'
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 13 for instruction: %I64 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 118 for instruction: %V2I64 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 302 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 1046 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 378 for instruction: %V4I64 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 1330 for instruction: %V8I64 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %I32 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 108 for instruction: %V2I32 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
@@ -292,8 +292,8 @@ define i32 @sub(i32 %arg) {
 ; MVE-RECIP-LABEL: 'sub'
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 13 for instruction: %I64 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 118 for instruction: %V2I64 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 302 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
-; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 1046 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 378 for instruction: %V4I64 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
+; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 1330 for instruction: %V8I64 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 10 for instruction: %I32 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 108 for instruction: %V2I32 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
 ; MVE-RECIP-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %V4I32 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll b/llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll
new file mode 100644
index 0000000000000..2223afa943958
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll
@@ -0,0 +1,497 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
+
+define arm_aapcs_vfpcc <4 x i32> @cmpeqz_v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: cmpeqz_v4i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vpt.i32 eq, q1, zr
+; CHECK-NEXT:    vcmpt.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vpt.i32 eq, q0, zr
+; CHECK-NEXT:    vcmpt.i32 eq, q2, zr
+; CHECK-NEXT:    vmrs r1, p0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp eq <4 x i32> %a, zeroinitializer
+  %c2 = icmp eq <4 x i32> %b, zeroinitializer
+  %c3 = icmp eq <4 x i32> %c, zeroinitializer
+  %c4 = select <4 x i1> %c3, <4 x i1> %c1, <4 x i1> %c2
+  %s = select <4 x i1> %c4, <4 x i32> %a, <4 x i32> %b
+  ret <4 x i32> %s
+}
+
+define arm_aapcs_vfpcc <8 x i16> @cmpeqz_v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK-LABEL: cmpeqz_v8i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vpt.i16 eq, q1, zr
+; CHECK-NEXT:    vcmpt.i16 ne, q2, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vpt.i16 eq, q0, zr
+; CHECK-NEXT:    vcmpt.i16 eq, q2, zr
+; CHECK-NEXT:    vmrs r1, p0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp eq <8 x i16> %a, zeroinitializer
+  %c2 = icmp eq <8 x i16> %b, zeroinitializer
+  %c3 = icmp eq <8 x i16> %c, zeroinitializer
+  %c4 = select <8 x i1> %c3, <8 x i1> %c1, <8 x i1> %c2
+  %s = select <8 x i1> %c4, <8 x i16> %a, <8 x i16> %b
+  ret <8 x i16> %s
+}
+
+define arm_aapcs_vfpcc <16 x i8> @cmpeqz_v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: cmpeqz_v16i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vpt.i8 eq, q1, zr
+; CHECK-NEXT:    vcmpt.i8 ne, q2, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vpt.i8 eq, q0, zr
+; CHECK-NEXT:    vcmpt.i8 eq, q2, zr
+; CHECK-NEXT:    vmrs r1, p0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp eq <16 x i8> %a, zeroinitializer
+  %c2 = icmp eq <16 x i8> %b, zeroinitializer
+  %c3 = icmp eq <16 x i8> %c, zeroinitializer
+  %c4 = select <16 x i1> %c3, <16 x i1> %c1, <16 x i1> %c2
+  %s = select <16 x i1> %c4, <16 x i8> %a, <16 x i8> %b
+  ret <16 x i8> %s
+}
+
+define arm_aapcs_vfpcc <2 x i64> @cmpeqz_v2i1(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
+; CHECK-LABEL: cmpeqz_v2i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s9
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    cset r1, eq
+; CHECK-NEXT:    tst.w r1, #1
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q2[2], q2[0], r1, r0
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r0
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    cset r1, eq
+; CHECK-NEXT:    tst.w r1, #1
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
+; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vbic q3, q3, q2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    cset r1, eq
+; CHECK-NEXT:    tst.w r1, #1
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q4[2], q4[0], r1, r0
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r0
+; CHECK-NEXT:    vand q2, q4, q2
+; CHECK-NEXT:    vorr q2, q2, q3
+; CHECK-NEXT:    vbic q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp eq <2 x i64> %a, zeroinitializer
+  %c2 = icmp eq <2 x i64> %b, zeroinitializer
+  %c3 = icmp eq <2 x i64> %c, zeroinitializer
+  %c4 = select <2 x i1> %c3, <2 x i1> %c1, <2 x i1> %c2
+  %s = select <2 x i1> %c4, <2 x i64> %a, <2 x i64> %b
+  ret <2 x i64> %s
+}
+
+define arm_aapcs_vfpcc <4 x i32> @cmpnez_v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: cmpnez_v4i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vpt.i32 ne, q1, zr
+; CHECK-NEXT:    vcmpt.i32 eq, q2, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vpt.i32 ne, q0, zr
+; CHECK-NEXT:    vcmpt.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs r1, p0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp ne <4 x i32> %a, zeroinitializer
+  %c2 = icmp ne <4 x i32> %b, zeroinitializer
+  %c3 = icmp ne <4 x i32> %c, zeroinitializer
+  %c4 = select <4 x i1> %c3, <4 x i1> %c1, <4 x i1> %c2
+  %s = select <4 x i1> %c4, <4 x i32> %a, <4 x i32> %b
+  ret <4 x i32> %s
+}
+
+define arm_aapcs_vfpcc <8 x i16> @cmpnez_v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK-LABEL: cmpnez_v8i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vpt.i16 ne, q1, zr
+; CHECK-NEXT:    vcmpt.i16 eq, q2, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vpt.i16 ne, q0, zr
+; CHECK-NEXT:    vcmpt.i16 ne, q2, zr
+; CHECK-NEXT:    vmrs r1, p0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp ne <8 x i16> %a, zeroinitializer
+  %c2 = icmp ne <8 x i16> %b, zeroinitializer
+  %c3 = icmp ne <8 x i16> %c, zeroinitializer
+  %c4 = select <8 x i1> %c3, <8 x i1> %c1, <8 x i1> %c2
+  %s = select <8 x i1> %c4, <8 x i16> %a, <8 x i16> %b
+  ret <8 x i16> %s
+}
+
+define arm_aapcs_vfpcc <16 x i8> @cmpnez_v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: cmpnez_v16i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vpt.i8 ne, q1, zr
+; CHECK-NEXT:    vcmpt.i8 eq, q2, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vpt.i8 ne, q0, zr
+; CHECK-NEXT:    vcmpt.i8 ne, q2, zr
+; CHECK-NEXT:    vmrs r1, p0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp ne <16 x i8> %a, zeroinitializer
+  %c2 = icmp ne <16 x i8> %b, zeroinitializer
+  %c3 = icmp ne <16 x i8> %c, zeroinitializer
+  %c4 = select <16 x i1> %c3, <16 x i1> %c1, <16 x i1> %c2
+  %s = select <16 x i1> %c4, <16 x i8> %a, <16 x i8> %b
+  ret <16 x i8> %s
+}
+
+define arm_aapcs_vfpcc <2 x i64> @cmpnez_v2i1(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
+; CHECK-LABEL: cmpnez_v2i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s9
+; CHECK-NEXT:    cset r0, ne
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    cset r1, ne
+; CHECK-NEXT:    tst.w r1, #1
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q2[2], q2[0], r1, r0
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r0
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    cset r0, ne
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    cset r1, ne
+; CHECK-NEXT:    tst.w r1, #1
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
+; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vbic q3, q3, q2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    cset r0, ne
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    cset r1, ne
+; CHECK-NEXT:    tst.w r1, #1
+; CHECK-NEXT:    csetm r1, ne
+; CHECK-NEXT:    vmov q4[2], q4[0], r1, r0
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r0
+; CHECK-NEXT:    vand q2, q4, q2
+; CHECK-NEXT:    vorr q2, q2, q3
+; CHECK-NEXT:    vbic q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp ne <2 x i64> %a, zeroinitializer
+  %c2 = icmp ne <2 x i64> %b, zeroinitializer
+  %c3 = icmp ne <2 x i64> %c, zeroinitializer
+  %c4 = select <2 x i1> %c3, <2 x i1> %c1, <2 x i1> %c2
+  %s = select <2 x i1> %c4, <2 x i64> %a, <2 x i64> %b
+  ret <2 x i64> %s
+}
+
+
+
+define arm_aapcs_vfpcc <4 x i32> @cmpsltz_v4i1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK-LABEL: cmpsltz_v4i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vpt.s32 lt, q1, zr
+; CHECK-NEXT:    vcmpt.s32 ge, q2, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vpt.s32 lt, q0, zr
+; CHECK-NEXT:    vcmpt.s32 lt, q2, zr
+; CHECK-NEXT:    vmrs r1, p0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp slt <4 x i32> %a, zeroinitializer
+  %c2 = icmp slt <4 x i32> %b, zeroinitializer
+  %c3 = icmp slt <4 x i32> %c, zeroinitializer
+  %c4 = select <4 x i1> %c3, <4 x i1> %c1, <4 x i1> %c2
+  %s = select <4 x i1> %c4, <4 x i32> %a, <4 x i32> %b
+  ret <4 x i32> %s
+}
+
+define arm_aapcs_vfpcc <8 x i16> @cmpsltz_v8i1(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK-LABEL: cmpsltz_v8i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vpt.s16 lt, q1, zr
+; CHECK-NEXT:    vcmpt.s16 ge, q2, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vpt.s16 lt, q0, zr
+; CHECK-NEXT:    vcmpt.s16 lt, q2, zr
+; CHECK-NEXT:    vmrs r1, p0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp slt <8 x i16> %a, zeroinitializer
+  %c2 = icmp slt <8 x i16> %b, zeroinitializer
+  %c3 = icmp slt <8 x i16> %c, zeroinitializer
+  %c4 = select <8 x i1> %c3, <8 x i1> %c1, <8 x i1> %c2
+  %s = select <8 x i1> %c4, <8 x i16> %a, <8 x i16> %b
+  ret <8 x i16> %s
+}
+
+define arm_aapcs_vfpcc <16 x i8> @cmpsltz_v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK-LABEL: cmpsltz_v16i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vpt.s8 lt, q1, zr
+; CHECK-NEXT:    vcmpt.s8 ge, q2, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vpt.s8 lt, q0, zr
+; CHECK-NEXT:    vcmpt.s8 lt, q2, zr
+; CHECK-NEXT:    vmrs r1, p0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmsr p0, r0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp slt <16 x i8> %a, zeroinitializer
+  %c2 = icmp slt <16 x i8> %b, zeroinitializer
+  %c3 = icmp slt <16 x i8> %c, zeroinitializer
+  %c4 = select <16 x i1> %c3, <16 x i1> %c1, <16 x i1> %c2
+  %s = select <16 x i1> %c4, <16 x i8> %a, <16 x i8> %b
+  ret <16 x i8> %s
+}
+
+define arm_aapcs_vfpcc <2 x i64> @cmpsltz_v2i1(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
+; CHECK-LABEL: cmpsltz_v2i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vmov r1, s9
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    asrs r1, r1, #31
+; CHECK-NEXT:    vmov q2[2], q2[0], r1, r0
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r0
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    asrs r1, r1, #31
+; CHECK-NEXT:    vmov q3[2], q3[0], r1, r0
+; CHECK-NEXT:    vmov q3[3], q3[1], r1, r0
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vbic q3, q3, q2
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    asrs r1, r1, #31
+; CHECK-NEXT:    vmov q4[2], q4[0], r1, r0
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r0
+; CHECK-NEXT:    vand q2, q4, q2
+; CHECK-NEXT:    vorr q2, q2, q3
+; CHECK-NEXT:    vbic q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp slt <2 x i64> %a, zeroinitializer
+  %c2 = icmp slt <2 x i64> %b, zeroinitializer
+  %c3 = icmp slt <2 x i64> %c, zeroinitializer
+  %c4 = select <2 x i1> %c3, <2 x i1> %c1, <2 x i1> %c2
+  %s = select <2 x i1> %c4, <2 x i64> %a, <2 x i64> %b
+  ret <2 x i64> %s
+}
+
+
+
+define arm_aapcs_vfpcc <4 x i32> @cmpeqz_v4i1_i1(<4 x i32> %a, <4 x i32> %b, i32 %c) {
+; CHECK-LABEL: cmpeqz_v4i1_i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    cbz r0, .LBB12_2
+; CHECK-NEXT:  @ %bb.1: @ %select.false
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:  .LBB12_2:
+; CHECK-NEXT:    vcmp.i32 eq, q0, zr
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp eq <4 x i32> %a, zeroinitializer
+  %c2 = icmp eq <4 x i32> %b, zeroinitializer
+  %c3 = icmp eq i32 %c, 0
+  %c4 = select i1 %c3, <4 x i1> %c1, <4 x i1> %c2
+  %s = select <4 x i1> %c4, <4 x i32> %a, <4 x i32> %b
+  ret <4 x i32> %s
+}
+
+define arm_aapcs_vfpcc <8 x i16> @cmpeqz_v8i1_i1(<8 x i16> %a, <8 x i16> %b, i16 %c) {
+; CHECK-LABEL: cmpeqz_v8i1_i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsls r0, r0, #16
+; CHECK-NEXT:    beq .LBB13_2
+; CHECK-NEXT:  @ %bb.1: @ %select.false
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:  .LBB13_2:
+; CHECK-NEXT:    vcmp.i16 eq, q0, zr
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp eq <8 x i16> %a, zeroinitializer
+  %c2 = icmp eq <8 x i16> %b, zeroinitializer
+  %c3 = icmp eq i16 %c, 0
+  %c4 = select i1 %c3, <8 x i1> %c1, <8 x i1> %c2
+  %s = select <8 x i1> %c4, <8 x i16> %a, <8 x i16> %b
+  ret <8 x i16> %s
+}
+
+define arm_aapcs_vfpcc <16 x i8> @cmpeqz_v16i1_i1(<16 x i8> %a, <16 x i8> %b, i8 %c) {
+; CHECK-LABEL: cmpeqz_v16i1_i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsls r0, r0, #24
+; CHECK-NEXT:    beq .LBB14_2
+; CHECK-NEXT:  @ %bb.1: @ %select.false
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:  .LBB14_2:
+; CHECK-NEXT:    vcmp.i8 eq, q0, zr
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %c1 = icmp eq <16 x i8> %a, zeroinitializer
+  %c2 = icmp eq <16 x i8> %b, zeroinitializer
+  %c3 = icmp eq i8 %c, 0
+  %c4 = select i1 %c3, <16 x i1> %c1, <16 x i1> %c2
+  %s = select <16 x i1> %c4, <16 x i8> %a, <16 x i8> %b
+  ret <16 x i8> %s
+}
+
+define arm_aapcs_vfpcc <2 x i64> @cmpeqz_v2i1_i1(<2 x i64> %a, <2 x i64> %b, i64 %c) {
+; CHECK-LABEL: cmpeqz_v2i1_i1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    vmov r2, s7
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    csetm r12, ne
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    csetm r4, ne
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    csetm lr, ne
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    beq .LBB15_2
+; CHECK-NEXT:  @ %bb.1: @ %select.false
+; CHECK-NEXT:    vmov q2[2], q2[0], r4, r12
+; CHECK-NEXT:    vmov q2[3], q2[1], r4, r12
+; CHECK-NEXT:    b .LBB15_3
+; CHECK-NEXT:  .LBB15_2:
+; CHECK-NEXT:    vmov q2[2], q2[0], r2, lr
+; CHECK-NEXT:    vmov q2[3], q2[1], r2, lr
+; CHECK-NEXT:  .LBB15_3: @ %select.end
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    and r0, r0, #1
+; CHECK-NEXT:    and r1, r1, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov q2[2], q2[0], r1, r0
+; CHECK-NEXT:    vmov q2[3], q2[1], r1, r0
+; CHECK-NEXT:    vbic q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    pop {r4, pc}
+entry:
+  %c1 = icmp eq <2 x i64> %a, zeroinitializer
+  %c2 = icmp eq <2 x i64> %b, zeroinitializer
+  %c3 = icmp eq i64 %c, zeroinitializer
+  %c4 = select i1 %c3, <2 x i1> %c1, <2 x i1> %c2
+  %s = select <2 x i1> %c4, <2 x i64> %a, <2 x i64> %b
+  ret <2 x i64> %s
+}


        


More information about the llvm-branch-commits mailing list