[llvm] e3720bb - [AArch64] Extend and cleanup vector icmp test cases. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 7 10:39:38 PST 2023


Author: David Green
Date: 2023-12-07T18:39:33Z
New Revision: e3720bbc088d904ed7fad9ad1a4db294d2bcfc05

URL: https://github.com/llvm/llvm-project/commit/e3720bbc088d904ed7fad9ad1a4db294d2bcfc05
DIFF: https://github.com/llvm/llvm-project/commit/e3720bbc088d904ed7fad9ad1a4db294d2bcfc05.diff

LOG: [AArch64] Extend and cleanup vector icmp test cases. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/neon-compare-instructions.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
index 0abdda5954f57..2d594947682b5 100644
--- a/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -7,7 +7,7 @@ define <8 x i8> @cmeq8xi8(<8 x i8> %A, <8 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmeq v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp eq <8 x i8> %A, %B;
+  %tmp3 = icmp eq <8 x i8> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -17,7 +17,7 @@ define <16 x i8> @cmeq16xi8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmeq v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp eq <16 x i8> %A, %B;
+  %tmp3 = icmp eq <16 x i8> %A, %B
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -27,7 +27,7 @@ define <4 x i16> @cmeq4xi16(<4 x i16> %A, <4 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmeq v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp eq <4 x i16> %A, %B;
+  %tmp3 = icmp eq <4 x i16> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -37,7 +37,7 @@ define <8 x i16> @cmeq8xi16(<8 x i16> %A, <8 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmeq v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp eq <8 x i16> %A, %B;
+  %tmp3 = icmp eq <8 x i16> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -47,7 +47,7 @@ define <2 x i32> @cmeq2xi32(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmeq v0.2s, v0.2s, v1.2s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp eq <2 x i32> %A, %B;
+  %tmp3 = icmp eq <2 x i32> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -57,7 +57,7 @@ define <4 x i32> @cmeq4xi32(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmeq v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp eq <4 x i32> %A, %B;
+  %tmp3 = icmp eq <4 x i32> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -67,7 +67,7 @@ define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmeq v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp eq <2 x i64> %A, %B;
+  %tmp3 = icmp eq <2 x i64> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -78,7 +78,7 @@ define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
 ; CHECK-NEXT:    cmeq v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    mvn v0.8b, v0.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ne <8 x i8> %A, %B;
+  %tmp3 = icmp ne <8 x i8> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -89,7 +89,7 @@ define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK-NEXT:    cmeq v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    mvn v0.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ne <16 x i8> %A, %B;
+  %tmp3 = icmp ne <16 x i8> %A, %B
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -100,7 +100,7 @@ define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
 ; CHECK-NEXT:    cmeq v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    mvn v0.8b, v0.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ne <4 x i16> %A, %B;
+  %tmp3 = icmp ne <4 x i16> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -111,7 +111,7 @@ define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
 ; CHECK-NEXT:    cmeq v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    mvn v0.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ne <8 x i16> %A, %B;
+  %tmp3 = icmp ne <8 x i16> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -122,7 +122,7 @@ define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-NEXT:    cmeq v0.2s, v0.2s, v1.2s
 ; CHECK-NEXT:    mvn v0.8b, v0.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ne <2 x i32> %A, %B;
+  %tmp3 = icmp ne <2 x i32> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -133,7 +133,7 @@ define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK-NEXT:    cmeq v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    mvn v0.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ne <4 x i32> %A, %B;
+  %tmp3 = icmp ne <4 x i32> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -144,7 +144,7 @@ define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
 ; CHECK-NEXT:    cmeq v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    mvn v0.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ne <2 x i64> %A, %B;
+  %tmp3 = icmp ne <2 x i64> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -154,7 +154,7 @@ define <8 x i8> @cmgt8xi8(<8 x i8> %A, <8 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sgt <8 x i8> %A, %B;
+  %tmp3 = icmp sgt <8 x i8> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -164,7 +164,7 @@ define <16 x i8> @cmgt16xi8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sgt <16 x i8> %A, %B;
+  %tmp3 = icmp sgt <16 x i8> %A, %B
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -174,7 +174,7 @@ define <4 x i16> @cmgt4xi16(<4 x i16> %A, <4 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sgt <4 x i16> %A, %B;
+  %tmp3 = icmp sgt <4 x i16> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -184,7 +184,7 @@ define <8 x i16> @cmgt8xi16(<8 x i16> %A, <8 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sgt <8 x i16> %A, %B;
+  %tmp3 = icmp sgt <8 x i16> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -194,7 +194,7 @@ define <2 x i32> @cmgt2xi32(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.2s, v0.2s, v1.2s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sgt <2 x i32> %A, %B;
+  %tmp3 = icmp sgt <2 x i32> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -204,7 +204,7 @@ define <4 x i32> @cmgt4xi32(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sgt <4 x i32> %A, %B;
+  %tmp3 = icmp sgt <4 x i32> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -214,7 +214,7 @@ define <2 x i64> @cmgt2xi64(<2 x i64> %A, <2 x i64> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sgt <2 x i64> %A, %B;
+  %tmp3 = icmp sgt <2 x i64> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -225,7 +225,7 @@ define <8 x i8> @cmlt8xi8(<8 x i8> %A, <8 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp slt <8 x i8> %A, %B;
+  %tmp3 = icmp slt <8 x i8> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -236,7 +236,7 @@ define <16 x i8> @cmlt16xi8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp slt <16 x i8> %A, %B;
+  %tmp3 = icmp slt <16 x i8> %A, %B
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -247,7 +247,7 @@ define <4 x i16> @cmlt4xi16(<4 x i16> %A, <4 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp slt <4 x i16> %A, %B;
+  %tmp3 = icmp slt <4 x i16> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -258,7 +258,7 @@ define <8 x i16> @cmlt8xi16(<8 x i16> %A, <8 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.8h, v1.8h, v0.8h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp slt <8 x i16> %A, %B;
+  %tmp3 = icmp slt <8 x i16> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -269,7 +269,7 @@ define <2 x i32> @cmlt2xi32(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.2s, v1.2s, v0.2s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp slt <2 x i32> %A, %B;
+  %tmp3 = icmp slt <2 x i32> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -280,7 +280,7 @@ define <4 x i32> @cmlt4xi32(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp slt <4 x i32> %A, %B;
+  %tmp3 = icmp slt <4 x i32> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -291,7 +291,7 @@ define <2 x i64> @cmlt2xi64(<2 x i64> %A, <2 x i64> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmgt v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp slt <2 x i64> %A, %B;
+  %tmp3 = icmp slt <2 x i64> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -301,7 +301,7 @@ define <8 x i8> @cmge8xi8(<8 x i8> %A, <8 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sge <8 x i8> %A, %B;
+  %tmp3 = icmp sge <8 x i8> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -311,7 +311,7 @@ define <16 x i8> @cmge16xi8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sge <16 x i8> %A, %B;
+  %tmp3 = icmp sge <16 x i8> %A, %B
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -321,7 +321,7 @@ define <4 x i16> @cmge4xi16(<4 x i16> %A, <4 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sge <4 x i16> %A, %B;
+  %tmp3 = icmp sge <4 x i16> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -331,7 +331,7 @@ define <8 x i16> @cmge8xi16(<8 x i16> %A, <8 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sge <8 x i16> %A, %B;
+  %tmp3 = icmp sge <8 x i16> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -341,7 +341,7 @@ define <2 x i32> @cmge2xi32(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.2s, v0.2s, v1.2s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sge <2 x i32> %A, %B;
+  %tmp3 = icmp sge <2 x i32> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -351,7 +351,7 @@ define <4 x i32> @cmge4xi32(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sge <4 x i32> %A, %B;
+  %tmp3 = icmp sge <4 x i32> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -361,7 +361,7 @@ define <2 x i64> @cmge2xi64(<2 x i64> %A, <2 x i64> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sge <2 x i64> %A, %B;
+  %tmp3 = icmp sge <2 x i64> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -372,7 +372,7 @@ define <8 x i8> @cmle8xi8(<8 x i8> %A, <8 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sle <8 x i8> %A, %B;
+  %tmp3 = icmp sle <8 x i8> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -383,7 +383,7 @@ define <16 x i8> @cmle16xi8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sle <16 x i8> %A, %B;
+  %tmp3 = icmp sle <16 x i8> %A, %B
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -394,7 +394,7 @@ define <4 x i16> @cmle4xi16(<4 x i16> %A, <4 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sle <4 x i16> %A, %B;
+  %tmp3 = icmp sle <4 x i16> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -405,7 +405,7 @@ define <8 x i16> @cmle8xi16(<8 x i16> %A, <8 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.8h, v1.8h, v0.8h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sle <8 x i16> %A, %B;
+  %tmp3 = icmp sle <8 x i16> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -416,7 +416,7 @@ define <2 x i32> @cmle2xi32(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.2s, v1.2s, v0.2s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sle <2 x i32> %A, %B;
+  %tmp3 = icmp sle <2 x i32> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -427,7 +427,7 @@ define <4 x i32> @cmle4xi32(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sle <4 x i32> %A, %B;
+  %tmp3 = icmp sle <4 x i32> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -438,7 +438,7 @@ define <2 x i64> @cmle2xi64(<2 x i64> %A, <2 x i64> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmge v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp sle <2 x i64> %A, %B;
+  %tmp3 = icmp sle <2 x i64> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -448,7 +448,7 @@ define <8 x i8> @cmhi8xi8(<8 x i8> %A, <8 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ugt <8 x i8> %A, %B;
+  %tmp3 = icmp ugt <8 x i8> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -458,7 +458,7 @@ define <16 x i8> @cmhi16xi8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ugt <16 x i8> %A, %B;
+  %tmp3 = icmp ugt <16 x i8> %A, %B
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -468,7 +468,7 @@ define <4 x i16> @cmhi4xi16(<4 x i16> %A, <4 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ugt <4 x i16> %A, %B;
+  %tmp3 = icmp ugt <4 x i16> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -478,7 +478,7 @@ define <8 x i16> @cmhi8xi16(<8 x i16> %A, <8 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ugt <8 x i16> %A, %B;
+  %tmp3 = icmp ugt <8 x i16> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -488,7 +488,7 @@ define <2 x i32> @cmhi2xi32(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.2s, v0.2s, v1.2s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ugt <2 x i32> %A, %B;
+  %tmp3 = icmp ugt <2 x i32> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -498,7 +498,7 @@ define <4 x i32> @cmhi4xi32(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ugt <4 x i32> %A, %B;
+  %tmp3 = icmp ugt <4 x i32> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -508,7 +508,7 @@ define <2 x i64> @cmhi2xi64(<2 x i64> %A, <2 x i64> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ugt <2 x i64> %A, %B;
+  %tmp3 = icmp ugt <2 x i64> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -519,7 +519,7 @@ define <8 x i8> @cmlo8xi8(<8 x i8> %A, <8 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ult <8 x i8> %A, %B;
+  %tmp3 = icmp ult <8 x i8> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -530,7 +530,7 @@ define <16 x i8> @cmlo16xi8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ult <16 x i8> %A, %B;
+  %tmp3 = icmp ult <16 x i8> %A, %B
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -541,7 +541,7 @@ define <4 x i16> @cmlo4xi16(<4 x i16> %A, <4 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ult <4 x i16> %A, %B;
+  %tmp3 = icmp ult <4 x i16> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -552,7 +552,7 @@ define <8 x i16> @cmlo8xi16(<8 x i16> %A, <8 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.8h, v1.8h, v0.8h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ult <8 x i16> %A, %B;
+  %tmp3 = icmp ult <8 x i16> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -563,7 +563,7 @@ define <2 x i32> @cmlo2xi32(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.2s, v1.2s, v0.2s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ult <2 x i32> %A, %B;
+  %tmp3 = icmp ult <2 x i32> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -574,7 +574,7 @@ define <4 x i32> @cmlo4xi32(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ult <4 x i32> %A, %B;
+  %tmp3 = icmp ult <4 x i32> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -585,7 +585,7 @@ define <2 x i64> @cmlo2xi64(<2 x i64> %A, <2 x i64> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhi v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ult <2 x i64> %A, %B;
+  %tmp3 = icmp ult <2 x i64> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -595,7 +595,7 @@ define <8 x i8> @cmhs8xi8(<8 x i8> %A, <8 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp uge <8 x i8> %A, %B;
+  %tmp3 = icmp uge <8 x i8> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -605,7 +605,7 @@ define <16 x i8> @cmhs16xi8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp uge <16 x i8> %A, %B;
+  %tmp3 = icmp uge <16 x i8> %A, %B
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -615,7 +615,7 @@ define <4 x i16> @cmhs4xi16(<4 x i16> %A, <4 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp uge <4 x i16> %A, %B;
+  %tmp3 = icmp uge <4 x i16> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -625,7 +625,7 @@ define <8 x i16> @cmhs8xi16(<8 x i16> %A, <8 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.8h, v0.8h, v1.8h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp uge <8 x i16> %A, %B;
+  %tmp3 = icmp uge <8 x i16> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -635,7 +635,7 @@ define <2 x i32> @cmhs2xi32(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.2s, v0.2s, v1.2s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp uge <2 x i32> %A, %B;
+  %tmp3 = icmp uge <2 x i32> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -645,7 +645,7 @@ define <4 x i32> @cmhs4xi32(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp uge <4 x i32> %A, %B;
+  %tmp3 = icmp uge <4 x i32> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -655,7 +655,7 @@ define <2 x i64> @cmhs2xi64(<2 x i64> %A, <2 x i64> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp uge <2 x i64> %A, %B;
+  %tmp3 = icmp uge <2 x i64> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -666,7 +666,7 @@ define <8 x i8> @cmls8xi8(<8 x i8> %A, <8 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <8 x i8> %A, %B;
+  %tmp3 = icmp ule <8 x i8> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -677,7 +677,7 @@ define <16 x i8> @cmls16xi8(<16 x i8> %A, <16 x i8> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <16 x i8> %A, %B;
+  %tmp3 = icmp ule <16 x i8> %A, %B
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -688,7 +688,7 @@ define <4 x i16> @cmls4xi16(<4 x i16> %A, <4 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <4 x i16> %A, %B;
+  %tmp3 = icmp ule <4 x i16> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -699,7 +699,7 @@ define <8 x i16> @cmls8xi16(<8 x i16> %A, <8 x i16> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.8h, v1.8h, v0.8h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <8 x i16> %A, %B;
+  %tmp3 = icmp ule <8 x i16> %A, %B
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -710,7 +710,7 @@ define <2 x i32> @cmls2xi32(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.2s, v1.2s, v0.2s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <2 x i32> %A, %B;
+  %tmp3 = icmp ule <2 x i32> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -721,7 +721,7 @@ define <4 x i32> @cmls4xi32(<4 x i32> %A, <4 x i32> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <4 x i32> %A, %B;
+  %tmp3 = icmp ule <4 x i32> %A, %B
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -732,7 +732,7 @@ define <2 x i64> @cmls2xi64(<2 x i64> %A, <2 x i64> %B) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmhs v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <2 x i64> %A, %B;
+  %tmp3 = icmp ule <2 x i64> %A, %B
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -883,7 +883,7 @@ define <8 x i8> @cmeqz8xi8(<8 x i8> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmeq v0.8b, v0.8b, v1.8b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp eq <8 x i8> %A, zeroinitializer;
+  %tmp3 = icmp eq <8 x i8> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -899,7 +899,7 @@ define <16 x i8> @cmeqz16xi8(<16 x i8> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmeq v0.16b, v0.16b, v1.16b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp eq <16 x i8> %A, zeroinitializer;
+  %tmp3 = icmp eq <16 x i8> %A, zeroinitializer
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -915,7 +915,7 @@ define <4 x i16> @cmeqz4xi16(<4 x i16> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmeq v0.4h, v0.4h, v1.4h
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp eq <4 x i16> %A, zeroinitializer;
+  %tmp3 = icmp eq <4 x i16> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -931,7 +931,7 @@ define <8 x i16> @cmeqz8xi16(<8 x i16> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmeq v0.8h, v0.8h, v1.8h
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp eq <8 x i16> %A, zeroinitializer;
+  %tmp3 = icmp eq <8 x i16> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -947,7 +947,7 @@ define <2 x i32> @cmeqz2xi32(<2 x i32> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmeq v0.2s, v0.2s, v1.2s
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp eq <2 x i32> %A, zeroinitializer;
+  %tmp3 = icmp eq <2 x i32> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -963,7 +963,7 @@ define <4 x i32> @cmeqz4xi32(<4 x i32> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmeq v0.4s, v0.4s, v1.4s
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp eq <4 x i32> %A, zeroinitializer;
+  %tmp3 = icmp eq <4 x i32> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -979,7 +979,7 @@ define <2 x i64> @cmeqz2xi64(<2 x i64> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmeq v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp eq <2 x i64> %A, zeroinitializer;
+  %tmp3 = icmp eq <2 x i64> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -996,7 +996,7 @@ define <8 x i8> @cmgez8xi8(<8 x i8> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.8b, v0.8b, v1.8b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sge <8 x i8> %A, zeroinitializer;
+  %tmp3 = icmp sge <8 x i8> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -1012,7 +1012,7 @@ define <16 x i8> @cmgez16xi8(<16 x i8> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.16b, v0.16b, v1.16b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sge <16 x i8> %A, zeroinitializer;
+  %tmp3 = icmp sge <16 x i8> %A, zeroinitializer
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -1028,7 +1028,7 @@ define <4 x i16> @cmgez4xi16(<4 x i16> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.4h, v0.4h, v1.4h
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sge <4 x i16> %A, zeroinitializer;
+  %tmp3 = icmp sge <4 x i16> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -1044,7 +1044,7 @@ define <8 x i16> @cmgez8xi16(<8 x i16> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.8h, v0.8h, v1.8h
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sge <8 x i16> %A, zeroinitializer;
+  %tmp3 = icmp sge <8 x i16> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -1060,7 +1060,7 @@ define <2 x i32> @cmgez2xi32(<2 x i32> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.2s, v0.2s, v1.2s
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sge <2 x i32> %A, zeroinitializer;
+  %tmp3 = icmp sge <2 x i32> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -1076,7 +1076,7 @@ define <4 x i32> @cmgez4xi32(<4 x i32> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.4s, v0.4s, v1.4s
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sge <4 x i32> %A, zeroinitializer;
+  %tmp3 = icmp sge <4 x i32> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -1092,7 +1092,7 @@ define <2 x i64> @cmgez2xi64(<2 x i64> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sge <2 x i64> %A, zeroinitializer;
+  %tmp3 = icmp sge <2 x i64> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -1210,6 +1210,118 @@ define <2 x i64> @cmgez2xi64_alt(<2 x i64> %A) {
   ret <2 x i64> %not
 }
 
+define <8 x i8> @cmgez8xi8_alt2(<8 x i8> %A) {
+; CHECK-SD-LABEL: cmgez8xi8_alt2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmge v0.8b, v0.8b, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmgez8xi8_alt2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi d1, #0xffffffffffffffff
+; CHECK-GI-NEXT:    cmgt v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp sgt <8 x i8> %A, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+  %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+  ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmgez16xi8_alt2(<16 x i8> %A) {
+; CHECK-SD-LABEL: cmgez16xi8_alt2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmge v0.16b, v0.16b, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmgez16xi8_alt2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2d, #0xffffffffffffffff
+; CHECK-GI-NEXT:    cmgt v0.16b, v0.16b, v1.16b
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp sgt <16 x i8> %A, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+  %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+  ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmgez4xi16_alt2(<4 x i16> %A) {
+; CHECK-SD-LABEL: cmgez4xi16_alt2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmge v0.4h, v0.4h, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmgez4xi16_alt2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi d1, #0xffffffffffffffff
+; CHECK-GI-NEXT:    cmgt v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp sgt <4 x i16> %A, <i16 -1, i16 -1, i16 -1, i16 -1>
+  %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+  ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmgez8xi16_alt2(<8 x i16> %A) {
+; CHECK-SD-LABEL: cmgez8xi16_alt2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmge v0.8h, v0.8h, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmgez8xi16_alt2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2d, #0xffffffffffffffff
+; CHECK-GI-NEXT:    cmgt v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp sgt <8 x i16> %A, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+  %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+  ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmgez2xi32_alt2(<2 x i32> %A) {
+; CHECK-SD-LABEL: cmgez2xi32_alt2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmge v0.2s, v0.2s, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmgez2xi32_alt2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi d1, #0xffffffffffffffff
+; CHECK-GI-NEXT:    cmgt v0.2s, v0.2s, v1.2s
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp sgt <2 x i32> %A, <i32 -1, i32 -1>
+  %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+  ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmgez4xi32_alt2(<4 x i32> %A) {
+; CHECK-SD-LABEL: cmgez4xi32_alt2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmge v0.4s, v0.4s, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmgez4xi32_alt2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2d, #0xffffffffffffffff
+; CHECK-GI-NEXT:    cmgt v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp sgt <4 x i32> %A, <i32 -1, i32 -1, i32 -1, i32 -1>
+  %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+  ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmgez2xi64_alt2(<2 x i64> %A) {
+; CHECK-SD-LABEL: cmgez2xi64_alt2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmge v0.2d, v0.2d, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmgez2xi64_alt2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2d, #0xffffffffffffffff
+; CHECK-GI-NEXT:    cmgt v0.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp sgt <2 x i64> %A, <i64 -1, i64 -1>
+  %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+  ret <2 x i64> %tmp4
+}
+
 
 define <8 x i8> @cmgtz8xi8(<8 x i8> %A) {
 ; CHECK-SD-LABEL: cmgtz8xi8:
@@ -1222,7 +1334,7 @@ define <8 x i8> @cmgtz8xi8(<8 x i8> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.8b, v0.8b, v1.8b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sgt <8 x i8> %A, zeroinitializer;
+  %tmp3 = icmp sgt <8 x i8> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -1238,7 +1350,7 @@ define <16 x i8> @cmgtz16xi8(<16 x i8> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.16b, v0.16b, v1.16b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sgt <16 x i8> %A, zeroinitializer;
+  %tmp3 = icmp sgt <16 x i8> %A, zeroinitializer
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -1254,7 +1366,7 @@ define <4 x i16> @cmgtz4xi16(<4 x i16> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.4h, v0.4h, v1.4h
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sgt <4 x i16> %A, zeroinitializer;
+  %tmp3 = icmp sgt <4 x i16> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -1270,7 +1382,7 @@ define <8 x i16> @cmgtz8xi16(<8 x i16> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.8h, v0.8h, v1.8h
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sgt <8 x i16> %A, zeroinitializer;
+  %tmp3 = icmp sgt <8 x i16> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -1286,7 +1398,7 @@ define <2 x i32> @cmgtz2xi32(<2 x i32> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.2s, v0.2s, v1.2s
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sgt <2 x i32> %A, zeroinitializer;
+  %tmp3 = icmp sgt <2 x i32> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -1302,7 +1414,7 @@ define <4 x i32> @cmgtz4xi32(<4 x i32> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.4s, v0.4s, v1.4s
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sgt <4 x i32> %A, zeroinitializer;
+  %tmp3 = icmp sgt <4 x i32> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -1318,7 +1430,7 @@ define <2 x i64> @cmgtz2xi64(<2 x i64> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sgt <2 x i64> %A, zeroinitializer;
+  %tmp3 = icmp sgt <2 x i64> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -1334,7 +1446,7 @@ define <8 x i8> @cmlez8xi8(<8 x i8> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.8b, v1.8b, v0.8b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sle <8 x i8> %A, zeroinitializer;
+  %tmp3 = icmp sle <8 x i8> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -1350,7 +1462,7 @@ define <16 x i8> @cmlez16xi8(<16 x i8> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.16b, v1.16b, v0.16b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sle <16 x i8> %A, zeroinitializer;
+  %tmp3 = icmp sle <16 x i8> %A, zeroinitializer
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -1366,7 +1478,7 @@ define <4 x i16> @cmlez4xi16(<4 x i16> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.4h, v1.4h, v0.4h
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sle <4 x i16> %A, zeroinitializer;
+  %tmp3 = icmp sle <4 x i16> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -1382,7 +1494,7 @@ define <8 x i16> @cmlez8xi16(<8 x i16> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.8h, v1.8h, v0.8h
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sle <8 x i16> %A, zeroinitializer;
+  %tmp3 = icmp sle <8 x i16> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -1398,7 +1510,7 @@ define <2 x i32> @cmlez2xi32(<2 x i32> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.2s, v1.2s, v0.2s
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sle <2 x i32> %A, zeroinitializer;
+  %tmp3 = icmp sle <2 x i32> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -1414,7 +1526,7 @@ define <4 x i32> @cmlez4xi32(<4 x i32> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.4s, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sle <4 x i32> %A, zeroinitializer;
+  %tmp3 = icmp sle <4 x i32> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -1430,7 +1542,120 @@ define <2 x i64> @cmlez2xi64(<2 x i64> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmge v0.2d, v1.2d, v0.2d
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp sle <2 x i64> %A, zeroinitializer;
+  %tmp3 = icmp sle <2 x i64> %A, zeroinitializer
+  %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+  ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmlez8xi8_alt(<8 x i8> %A) {
+; CHECK-SD-LABEL: cmlez8xi8_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmle v0.8b, v0.8b, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmlez8xi8_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.8b, #1
+; CHECK-GI-NEXT:    cmgt v0.8b, v1.8b, v0.8b
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp slt <8 x i8> %A, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+  ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmlez16xi8_alt(<16 x i8> %A) {
+; CHECK-SD-LABEL: cmlez16xi8_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmle v0.16b, v0.16b, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmlez16xi8_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.16b, #1
+; CHECK-GI-NEXT:    cmgt v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp slt <16 x i8> %A, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+  ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmlez4xi16_alt(<4 x i16> %A) {
+; CHECK-SD-LABEL: cmlez4xi16_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmle v0.4h, v0.4h, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmlez4xi16_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.4h, #1
+; CHECK-GI-NEXT:    cmgt v0.4h, v1.4h, v0.4h
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp slt <4 x i16> %A, <i16 1, i16 1, i16 1, i16 1>
+  %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+  ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmlez8xi16_alt(<8 x i16> %A) {
+; CHECK-SD-LABEL: cmlez8xi16_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmle v0.8h, v0.8h, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmlez8xi16_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.8h, #1
+; CHECK-GI-NEXT:    cmgt v0.8h, v1.8h, v0.8h
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp slt <8 x i16> %A, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+  ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmlez2xi32_alt(<2 x i32> %A) {
+; CHECK-SD-LABEL: cmlez2xi32_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmle v0.2s, v0.2s, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmlez2xi32_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2s, #1
+; CHECK-GI-NEXT:    cmgt v0.2s, v1.2s, v0.2s
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp slt <2 x i32> %A, <i32 1, i32 1>
+  %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+  ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmlez4xi32_alt(<4 x i32> %A) {
+; CHECK-SD-LABEL: cmlez4xi32_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmle v0.4s, v0.4s, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmlez4xi32_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.4s, #1
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp slt <4 x i32> %A, <i32 1, i32 1, i32 1, i32 1>
+  %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+  ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmlez2xi64_alt(<2 x i64> %A) {
+; CHECK-SD-LABEL: cmlez2xi64_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmle v0.2d, v0.2d, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmlez2xi64_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    adrp x8, .LCPI125_0
+; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI125_0]
+; CHECK-GI-NEXT:    cmgt v0.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    ret
+  %tmp3 = icmp slt <2 x i64> %A, <i64 1, i64 1>
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -1446,7 +1671,7 @@ define <8 x i8> @cmltz8xi8(<8 x i8> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.8b, v1.8b, v0.8b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp slt <8 x i8> %A, zeroinitializer;
+  %tmp3 = icmp slt <8 x i8> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -1462,7 +1687,7 @@ define <16 x i8> @cmltz16xi8(<16 x i8> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.16b, v1.16b, v0.16b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp slt <16 x i8> %A, zeroinitializer;
+  %tmp3 = icmp slt <16 x i8> %A, zeroinitializer
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -1478,7 +1703,7 @@ define <4 x i16> @cmltz4xi16(<4 x i16> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.4h, v1.4h, v0.4h
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp slt <4 x i16> %A, zeroinitializer;
+  %tmp3 = icmp slt <4 x i16> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -1494,7 +1719,7 @@ define <8 x i16> @cmltz8xi16(<8 x i16> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.8h, v1.8h, v0.8h
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp slt <8 x i16> %A, zeroinitializer;
+  %tmp3 = icmp slt <8 x i16> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -1510,7 +1735,7 @@ define <2 x i32> @cmltz2xi32(<2 x i32> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.2s, v1.2s, v0.2s
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp slt <2 x i32> %A, zeroinitializer;
+  %tmp3 = icmp slt <2 x i32> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -1526,7 +1751,7 @@ define <4 x i32> @cmltz4xi32(<4 x i32> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp slt <4 x i32> %A, zeroinitializer;
+  %tmp3 = icmp slt <4 x i32> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -1542,11 +1767,109 @@ define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
 ; CHECK-GI-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-GI-NEXT:    cmgt v0.2d, v1.2d, v0.2d
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp slt <2 x i64> %A, zeroinitializer;
+  %tmp3 = icmp slt <2 x i64> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
 
+define <8 x i8> @cmltz8xi8_alt(<8 x i8> %A) {
+; CHECK-SD-LABEL: cmltz8xi8_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmlt v0.8b, v0.8b, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmltz8xi8_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    sshr v0.8b, v0.8b, #7
+; CHECK-GI-NEXT:    ret
+  %A.lobit = ashr <8 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+  ret <8 x i8> %A.lobit
+}
+
+define <16 x i8> @cmltz16xi8_alt(<16 x i8> %A) {
+; CHECK-SD-LABEL: cmltz16xi8_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmlt v0.16b, v0.16b, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmltz16xi8_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    sshr v0.16b, v0.16b, #7
+; CHECK-GI-NEXT:    ret
+  %A.lobit = ashr <16 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+  ret <16 x i8> %A.lobit
+}
+
+define <4 x i16> @cmltz4xi16_alt(<4 x i16> %A) {
+; CHECK-SD-LABEL: cmltz4xi16_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmlt v0.4h, v0.4h, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmltz4xi16_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    sshr v0.4h, v0.4h, #15
+; CHECK-GI-NEXT:    ret
+  %A.lobit = ashr <4 x i16> %A, <i16 15, i16 15, i16 15, i16 15>
+  ret <4 x i16> %A.lobit
+}
+
+define <8 x i16> @cmltz8xi16_alt(<8 x i16> %A) {
+; CHECK-SD-LABEL: cmltz8xi16_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmlt v0.8h, v0.8h, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmltz8xi16_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    sshr v0.8h, v0.8h, #15
+; CHECK-GI-NEXT:    ret
+  %A.lobit = ashr <8 x i16> %A, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  ret <8 x i16> %A.lobit
+}
+
+define <2 x i32> @cmltz2xi32_alt(<2 x i32> %A) {
+; CHECK-SD-LABEL: cmltz2xi32_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmlt v0.2s, v0.2s, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmltz2xi32_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    sshr v0.2s, v0.2s, #31
+; CHECK-GI-NEXT:    ret
+  %A.lobit = ashr <2 x i32> %A, <i32 31, i32 31>
+  ret <2 x i32> %A.lobit
+}
+
+define <4 x i32> @cmltz4xi32_alt(<4 x i32> %A) {
+; CHECK-SD-LABEL: cmltz4xi32_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmltz4xi32_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    sshr v0.4s, v0.4s, #31
+; CHECK-GI-NEXT:    ret
+  %A.lobit = ashr <4 x i32> %A, <i32 31, i32 31, i32 31, i32 31>
+  ret <4 x i32> %A.lobit
+}
+
+define <2 x i64> @cmltz2xi64_alt(<2 x i64> %A) {
+; CHECK-SD-LABEL: cmltz2xi64_alt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmlt v0.2d, v0.2d, #0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: cmltz2xi64_alt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    sshr v0.2d, v0.2d, #63
+; CHECK-GI-NEXT:    ret
+  %A.lobit = ashr <2 x i64> %A, <i64 63, i64 63>
+  ret <2 x i64> %A.lobit
+}
+
 define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
 ; CHECK-SD-LABEL: cmneqz8xi8:
 ; CHECK-SD:       // %bb.0:
@@ -1559,7 +1882,7 @@ define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
 ; CHECK-GI-NEXT:    cmeq v0.8b, v0.8b, v1.8b
 ; CHECK-GI-NEXT:    mvn v0.8b, v0.8b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
+  %tmp3 = icmp ne <8 x i8> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -1576,7 +1899,7 @@ define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
 ; CHECK-GI-NEXT:    cmeq v0.16b, v0.16b, v1.16b
 ; CHECK-GI-NEXT:    mvn v0.16b, v0.16b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
+  %tmp3 = icmp ne <16 x i8> %A, zeroinitializer
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -1593,7 +1916,7 @@ define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
 ; CHECK-GI-NEXT:    cmeq v0.4h, v0.4h, v1.4h
 ; CHECK-GI-NEXT:    mvn v0.8b, v0.8b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
+  %tmp3 = icmp ne <4 x i16> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -1610,7 +1933,7 @@ define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
 ; CHECK-GI-NEXT:    cmeq v0.8h, v0.8h, v1.8h
 ; CHECK-GI-NEXT:    mvn v0.16b, v0.16b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
+  %tmp3 = icmp ne <8 x i16> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -1627,7 +1950,7 @@ define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
 ; CHECK-GI-NEXT:    cmeq v0.2s, v0.2s, v1.2s
 ; CHECK-GI-NEXT:    mvn v0.8b, v0.8b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
+  %tmp3 = icmp ne <2 x i32> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -1644,7 +1967,7 @@ define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
 ; CHECK-GI-NEXT:    cmeq v0.4s, v0.4s, v1.4s
 ; CHECK-GI-NEXT:    mvn v0.16b, v0.16b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
+  %tmp3 = icmp ne <4 x i32> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -1661,7 +1984,7 @@ define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
 ; CHECK-GI-NEXT:    cmeq v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    mvn v0.16b, v0.16b
 ; CHECK-GI-NEXT:    ret
-  %tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
+  %tmp3 = icmp ne <2 x i64> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -1742,16 +2065,10 @@ define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
 ;
 ; CHECK-GI-LABEL: cmhsz2xi64:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    adrp x8, .LCPI132_0
-; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI132_0]
+; CHECK-GI-NEXT:    adrp x8, .LCPI153_0
+; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI153_0]
 ; CHECK-GI-NEXT:    cmhs v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    ret
-; GISEL-LABEL: cmhsz2xi64:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    adrp x8, .LCPI132_0
-; GISEL-NEXT:    ldr q1, [x8, :lo12:.LCPI132_0]
-; GISEL-NEXT:    cmhs v0.2d, v0.2d, v1.2d
-; GISEL-NEXT:    ret
   %tmp3 = icmp uge <2 x i64> %A, <i64 2, i64 2>
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
@@ -1834,16 +2151,10 @@ define <2 x i64> @cmhiz2xi64(<2 x i64> %A) {
 ;
 ; CHECK-GI-LABEL: cmhiz2xi64:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    adrp x8, .LCPI139_0
-; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI139_0]
+; CHECK-GI-NEXT:    adrp x8, .LCPI160_0
+; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI160_0]
 ; CHECK-GI-NEXT:    cmhi v0.2d, v0.2d, v1.2d
 ; CHECK-GI-NEXT:    ret
-; GISEL-LABEL: cmhiz2xi64:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    adrp x8, .LCPI139_0
-; GISEL-NEXT:    ldr q1, [x8, :lo12:.LCPI139_0]
-; GISEL-NEXT:    cmhi v0.2d, v0.2d, v1.2d
-; GISEL-NEXT:    ret
   %tmp3 = icmp ugt <2 x i64> %A, <i64 1, i64 1>
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
@@ -1856,7 +2167,7 @@ define <8 x i8> @cmlsz8xi8(<8 x i8> %A) {
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    cmhs v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <8 x i8> %A, zeroinitializer;
+  %tmp3 = icmp ule <8 x i8> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
   ret <8 x i8> %tmp4
 }
@@ -1868,7 +2179,7 @@ define <16 x i8> @cmlsz16xi8(<16 x i8> %A) {
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    cmhs v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <16 x i8> %A, zeroinitializer;
+  %tmp3 = icmp ule <16 x i8> %A, zeroinitializer
   %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
   ret <16 x i8> %tmp4
 }
@@ -1880,7 +2191,7 @@ define <4 x i16> @cmlsz4xi16(<4 x i16> %A) {
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    cmhs v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <4 x i16> %A, zeroinitializer;
+  %tmp3 = icmp ule <4 x i16> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
   ret <4 x i16> %tmp4
 }
@@ -1892,7 +2203,7 @@ define <8 x i16> @cmlsz8xi16(<8 x i16> %A) {
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    cmhs v0.8h, v1.8h, v0.8h
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <8 x i16> %A, zeroinitializer;
+  %tmp3 = icmp ule <8 x i16> %A, zeroinitializer
   %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
   ret <8 x i16> %tmp4
 }
@@ -1904,7 +2215,7 @@ define <2 x i32> @cmlsz2xi32(<2 x i32> %A) {
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    cmhs v0.2s, v1.2s, v0.2s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <2 x i32> %A, zeroinitializer;
+  %tmp3 = icmp ule <2 x i32> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
   ret <2 x i32> %tmp4
 }
@@ -1916,7 +2227,7 @@ define <4 x i32> @cmlsz4xi32(<4 x i32> %A) {
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    cmhs v0.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <4 x i32> %A, zeroinitializer;
+  %tmp3 = icmp ule <4 x i32> %A, zeroinitializer
   %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4
 }
@@ -1928,7 +2239,7 @@ define <2 x i64> @cmlsz2xi64(<2 x i64> %A) {
 ; CHECK-NEXT:    movi v1.2d, #0000000000000000
 ; CHECK-NEXT:    cmhs v0.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    ret
-  %tmp3 = icmp ule <2 x i64> %A, zeroinitializer;
+  %tmp3 = icmp ule <2 x i64> %A, zeroinitializer
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
 }
@@ -2016,16 +2327,10 @@ define <2 x i64> @cmloz2xi64(<2 x i64> %A) {
 ;
 ; CHECK-GI-LABEL: cmloz2xi64:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    adrp x8, .LCPI153_0
-; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI153_0]
+; CHECK-GI-NEXT:    adrp x8, .LCPI174_0
+; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI174_0]
 ; CHECK-GI-NEXT:    cmhi v0.2d, v1.2d, v0.2d
 ; CHECK-GI-NEXT:    ret
-; GISEL-LABEL: cmloz2xi64:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    adrp x8, .LCPI153_0
-; GISEL-NEXT:    ldr q1, [x8, :lo12:.LCPI153_0]
-; GISEL-NEXT:    cmhi v0.2d, v1.2d, v0.2d
-; GISEL-NEXT:    ret
   %tmp3 = icmp ult <2 x i64> %A, <i64 2, i64 2>
   %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
   ret <2 x i64> %tmp4
@@ -4175,13 +4480,6 @@ define <4 x i32> @fcmule4xfloat_fast_zext(<4 x float> %A, <4 x float> %B) {
 ; CHECK-GI-NEXT:    fcmgt v0.4s, v0.4s, v1.4s
 ; CHECK-GI-NEXT:    bic v0.16b, v2.16b, v0.16b
 ; CHECK-GI-NEXT:    ret
-; GISEL-LABEL: fcmule4xfloat_fast_zext:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    fcmgt v0.4s, v0.4s, v1.4s
-; GISEL-NEXT:    adrp x8, .LCPI322_0
-; GISEL-NEXT:    ldr q1, [x8, :lo12:.LCPI322_0]
-; GISEL-NEXT:    bic v0.16b, v1.16b, v0.16b
-; GISEL-NEXT:    ret
   %tmp3 = fcmp fast ule <4 x float> %A, %B
   %tmp4 = zext <4 x i1> %tmp3 to <4 x i32>
   ret <4 x i32> %tmp4


        


More information about the llvm-commits mailing list