[llvm] r324436 - [Mips][AMDGPU] Update test cases to not use vector lt/gt compares that can be simplified to an equality/inequality or to always true/false.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 6 16:51:37 PST 2018


Author: ctopper
Date: Tue Feb  6 16:51:37 2018
New Revision: 324436

URL: http://llvm.org/viewvc/llvm-project?rev=324436&view=rev
Log:
[Mips][AMDGPU] Update test cases to not use vector lt/gt compares that can be simplified to an equality/inequality or to always true/false.

For example 'ugt X, 0' can be simplified to 'ne X, 0'. Or 'uge X, 0' is always true.

We already simplify this for scalars in SimplifySetCC, but we don't currently for vectors in SimplifySetCC. D42948 proposes to change that.

Modified:
    llvm/trunk/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll
    llvm/trunk/test/CodeGen/AArch64/neon-compare-instructions.ll
    llvm/trunk/test/CodeGen/Mips/msa/compare.ll

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll?rev=324436&r1=324435&r2=324436&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-neon-compare-instructions.ll Tue Feb  6 16:51:37 2018
@@ -858,114 +858,116 @@ define <2 x i64> @cmneqz2xi64(<2 x i64>
 }
 
 define <8 x i8> @cmhsz8xi8(<8 x i8> %A) {
-;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK: movi v[[ZERO:[0-9]+]].8b, #2
 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, v[[ZERO]].8b
-	%tmp3 = icmp uge <8 x i8> %A, zeroinitializer;
+	%tmp3 = icmp uge <8 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
 }
 
 define <16 x i8> @cmhsz16xi8(<16 x i8> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: movi v[[ZERO:[0-9]+]].16b, #2
 ;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, v[[ZERO]].16b
-	%tmp3 = icmp uge <16 x i8> %A, zeroinitializer;
+	%tmp3 = icmp uge <16 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
    %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
 }
 
 define <4 x i16> @cmhsz4xi16(<4 x i16> %A) {
-;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK: movi v[[ZERO:[0-9]+]].4h, #2
 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, v[[ZERO]].4h
-	%tmp3 = icmp uge <4 x i16> %A, zeroinitializer;
+	%tmp3 = icmp uge <4 x i16> %A, <i16 2, i16 2, i16 2, i16 2>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
 }
 
 define <8 x i16> @cmhsz8xi16(<8 x i16> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: movi v[[ZERO:[0-9]+]].8h, #2
 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, v[[ZERO]].8h
-	%tmp3 = icmp uge <8 x i16> %A, zeroinitializer;
+	%tmp3 = icmp uge <8 x i16> %A, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
 }
 
 define <2 x i32> @cmhsz2xi32(<2 x i32> %A) {
-;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK: movi v[[ZERO:[0-9]+]].2s, #2
 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, v[[ZERO]].2s
-	%tmp3 = icmp uge <2 x i32> %A, zeroinitializer;
+	%tmp3 = icmp uge <2 x i32> %A, <i32 2, i32 2>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
 }
 
 define <4 x i32> @cmhsz4xi32(<4 x i32> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: movi v[[ZERO:[0-9]+]].4s, #2
 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, v[[ZERO]].4s
-	%tmp3 = icmp uge <4 x i32> %A, zeroinitializer;
+	%tmp3 = icmp uge <4 x i32> %A, <i32 2, i32 2, i32 2, i32 2>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: orr w[[TWO:[0-9]+]], wzr, #0x2
+;CHECK-NEXT: dup v[[ZERO:[0-9]+]].2d, x[[TWO]]
 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, v[[ZERO]].2d
-	%tmp3 = icmp uge <2 x i64> %A, zeroinitializer;
+	%tmp3 = icmp uge <2 x i64> %A, <i64 2, i64 2>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
 	ret <2 x i64> %tmp4
 }
 
 
 define <8 x i8> @cmhiz8xi8(<8 x i8> %A) {
-;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK: movi v[[ZERO:[0-9]+]].8b, #1
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, v[[ZERO]].8b
-	%tmp3 = icmp ugt <8 x i8> %A, zeroinitializer;
+	%tmp3 = icmp ugt <8 x i8> %A, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
 }
 
 define <16 x i8> @cmhiz16xi8(<16 x i8> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: movi v[[ZERO:[0-9]+]].16b, #1
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, v[[ZERO]].16b
-	%tmp3 = icmp ugt <16 x i8> %A, zeroinitializer;
+	%tmp3 = icmp ugt <16 x i8> %A, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
    %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
 }
 
 define <4 x i16> @cmhiz4xi16(<4 x i16> %A) {
-;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK: movi v[[ZERO:[0-9]+]].4h, #1
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, v[[ZERO]].4h
-	%tmp3 = icmp ugt <4 x i16> %A, zeroinitializer;
+	%tmp3 = icmp ugt <4 x i16> %A, <i16 1, i16 1, i16 1, i16 1>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
 }
 
 define <8 x i16> @cmhiz8xi16(<8 x i16> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: movi v[[ZERO:[0-9]+]].8h, #1
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, v[[ZERO]].8h
-	%tmp3 = icmp ugt <8 x i16> %A, zeroinitializer;
+	%tmp3 = icmp ugt <8 x i16> %A, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
 }
 
 define <2 x i32> @cmhiz2xi32(<2 x i32> %A) {
-;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK: movi v[[ZERO:[0-9]+]].2s, #1
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, v[[ZERO]].2s
-	%tmp3 = icmp ugt <2 x i32> %A, zeroinitializer;
+	%tmp3 = icmp ugt <2 x i32> %A, <i32 1, i32 1>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
 }
 
 define <4 x i32> @cmhiz4xi32(<4 x i32> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: movi v[[ZERO:[0-9]+]].4s, #1
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, v[[ZERO]].4s
-	%tmp3 = icmp ugt <4 x i32> %A, zeroinitializer;
+	%tmp3 = icmp ugt <4 x i32> %A, <i32 1, i32 1, i32 1, i32 1>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @cmhiz2xi64(<2 x i64> %A) {
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: orr w[[ONE:[0-9]+]], wzr, #0x1
+;CHECK-NEXT: dup v[[ZERO:[0-9]+]].2d, x[[ONE]]
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, v[[ZERO]].2d
-	%tmp3 = icmp ugt <2 x i64> %A, zeroinitializer;
+	%tmp3 = icmp ugt <2 x i64> %A, <i64 1, i64 1>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
 	ret <2 x i64> %tmp4
 }
@@ -1043,9 +1045,9 @@ define <2 x i64> @cmlsz2xi64(<2 x i64> %
 define <8 x i8> @cmloz8xi8(<8 x i8> %A) {
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK: movi v[[ZERO:[0-9]+]].8b, #2
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v[[ZERO]].8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp ult <8 x i8> %A, zeroinitializer;
+	%tmp3 = icmp ult <8 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
 }
@@ -1053,9 +1055,9 @@ define <8 x i8> @cmloz8xi8(<8 x i8> %A)
 define <16 x i8> @cmloz16xi8(<16 x i8> %A) {
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: movi v[[ZERO:[0-9]+]].16b, #2
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v[[ZERO]].16b, v0.16b
-	%tmp3 = icmp ult <16 x i8> %A, zeroinitializer;
+	%tmp3 = icmp ult <16 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
    %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
 }
@@ -1063,9 +1065,9 @@ define <16 x i8> @cmloz16xi8(<16 x i8> %
 define <4 x i16> @cmloz4xi16(<4 x i16> %A) {
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK: movi v[[ZERO:[0-9]+]].4h, #2
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v[[ZERO]].4h, v0.4h
-	%tmp3 = icmp ult <4 x i16> %A, zeroinitializer;
+	%tmp3 = icmp ult <4 x i16> %A, <i16 2, i16 2, i16 2, i16 2>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
 }
@@ -1073,9 +1075,9 @@ define <4 x i16> @cmloz4xi16(<4 x i16> %
 define <8 x i16> @cmloz8xi16(<8 x i16> %A) {
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: movi v[[ZERO:[0-9]+]].8h, #2
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v[[ZERO]].8h, v0.8h
-	%tmp3 = icmp ult <8 x i16> %A, zeroinitializer;
+	%tmp3 = icmp ult <8 x i16> %A, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
 }
@@ -1083,9 +1085,9 @@ define <8 x i16> @cmloz8xi16(<8 x i16> %
 define <2 x i32> @cmloz2xi32(<2 x i32> %A) {
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-;CHECK: movi d[[ZERO:[0-9]+]], #0
+;CHECK: movi v[[ZERO:[0-9]+]].2s, #2
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v[[ZERO]].2s, v0.2s
-	%tmp3 = icmp ult <2 x i32> %A, zeroinitializer;
+	%tmp3 = icmp ult <2 x i32> %A, <i32 2, i32 2>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
 }
@@ -1093,9 +1095,9 @@ define <2 x i32> @cmloz2xi32(<2 x i32> %
 define <4 x i32> @cmloz4xi32(<4 x i32> %A) {
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: movi v[[ZERO:[0-9]+]].4s, #2
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v[[ZERO]].4s, v0.4s
-	%tmp3 = icmp ult <4 x i32> %A, zeroinitializer;
+	%tmp3 = icmp ult <4 x i32> %A, <i32 2, i32 2, i32 2, i32 2>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
 }
@@ -1103,9 +1105,10 @@ define <4 x i32> @cmloz4xi32(<4 x i32> %
 define <2 x i64> @cmloz2xi64(<2 x i64> %A) {
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-;CHECK: movi v[[ZERO:[0-9]+]].2d, #0
+;CHECK: orr w[[TWO:[0-9]+]], wzr, #0x2
+;CHECK-NEXT: dup v[[ZERO:[0-9]+]].2d, x[[TWO]]
 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v[[ZERO]].2d, v0.2d
-	%tmp3 = icmp ult <2 x i64> %A, zeroinitializer;
+	%tmp3 = icmp ult <2 x i64> %A, <i64 2, i64 2>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
 	ret <2 x i64> %tmp4
 }

Modified: llvm/trunk/test/CodeGen/AArch64/neon-compare-instructions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/neon-compare-instructions.ll?rev=324436&r1=324435&r2=324436&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/neon-compare-instructions.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/neon-compare-instructions.ll Tue Feb  6 16:51:37 2018
@@ -1092,63 +1092,64 @@ define <2 x i64> @cmneqz2xi64(<2 x i64>
 
 define <8 x i8> @cmhsz8xi8(<8 x i8> %A) {
 ; CHECK-LABEL: cmhsz8xi8:
-; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.8b, #{{0x2|2}}
 ; CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp uge <8 x i8> %A, zeroinitializer;
+	%tmp3 = icmp uge <8 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
 }
 
 define <16 x i8> @cmhsz16xi8(<16 x i8> %A) {
 ; CHECK-LABEL: cmhsz16xi8:
-; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.16b, #{{0x2|2}}
 ; CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp uge <16 x i8> %A, zeroinitializer;
+	%tmp3 = icmp uge <16 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
    %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
 }
 
 define <4 x i16> @cmhsz4xi16(<4 x i16> %A) {
 ; CHECK-LABEL: cmhsz4xi16:
-; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.4h, #{{0x2|2}}
 ; CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-	%tmp3 = icmp uge <4 x i16> %A, zeroinitializer;
+	%tmp3 = icmp uge <4 x i16> %A, <i16 2, i16 2, i16 2, i16 2>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
 }
 
 define <8 x i16> @cmhsz8xi16(<8 x i16> %A) {
 ; CHECK-LABEL: cmhsz8xi16:
-; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.8h, #{{0x2|2}}
 ; CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-	%tmp3 = icmp uge <8 x i16> %A, zeroinitializer;
+	%tmp3 = icmp uge <8 x i16> %A, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
 }
 
 define <2 x i32> @cmhsz2xi32(<2 x i32> %A) {
 ; CHECK-LABEL: cmhsz2xi32:
-; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.2s, #{{0x2|2}}
 ; CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-	%tmp3 = icmp uge <2 x i32> %A, zeroinitializer;
+	%tmp3 = icmp uge <2 x i32> %A, <i32 2, i32 2>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
 }
 
 define <4 x i32> @cmhsz4xi32(<4 x i32> %A) {
 ; CHECK-LABEL: cmhsz4xi32:
-; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.4s, #{{0x2|2}}
 ; CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-	%tmp3 = icmp uge <4 x i32> %A, zeroinitializer;
+	%tmp3 = icmp uge <4 x i32> %A, <i32 2, i32 2, i32 2, i32 2>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
 ; CHECK-LABEL: cmhsz2xi64:
-; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK: orr w[[TWO:[0-9]+]], wzr, #0x2
+; CHECK-NEXT: {{v[0-9]+}}.2d, x[[TWO]]
 ; CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-	%tmp3 = icmp uge <2 x i64> %A, zeroinitializer;
+	%tmp3 = icmp uge <2 x i64> %A, <i64 2, i64 2>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
 	ret <2 x i64> %tmp4
 }
@@ -1156,63 +1157,64 @@ define <2 x i64> @cmhsz2xi64(<2 x i64> %
 
 define <8 x i8> @cmhiz8xi8(<8 x i8> %A) {
 ; CHECK-LABEL: cmhiz8xi8:
-; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.8b, #{{0x1|1}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp ugt <8 x i8> %A, zeroinitializer;
+	%tmp3 = icmp ugt <8 x i8> %A, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
 }
 
 define <16 x i8> @cmhiz16xi8(<16 x i8> %A) {
 ; CHECK-LABEL: cmhiz16xi8:
-; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.16b, #{{0x1|1}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
-	%tmp3 = icmp ugt <16 x i8> %A, zeroinitializer;
+	%tmp3 = icmp ugt <16 x i8> %A, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
    %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
 }
 
 define <4 x i16> @cmhiz4xi16(<4 x i16> %A) {
 ; CHECK-LABEL: cmhiz4xi16:
-; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.4h, #{{0x1|1}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
-	%tmp3 = icmp ugt <4 x i16> %A, zeroinitializer;
+	%tmp3 = icmp ugt <4 x i16> %A, <i16 1, i16 1, i16 1, i16 1>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
 }
 
 define <8 x i16> @cmhiz8xi16(<8 x i16> %A) {
 ; CHECK-LABEL: cmhiz8xi16:
-; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.8h, #{{0x1|1}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-	%tmp3 = icmp ugt <8 x i16> %A, zeroinitializer;
+	%tmp3 = icmp ugt <8 x i16> %A, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
 }
 
 define <2 x i32> @cmhiz2xi32(<2 x i32> %A) {
 ; CHECK-LABEL: cmhiz2xi32:
-; CHECK: movi {{v[0-9]+.8b|d[0-9]+}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.2s, #{{0x1|1}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
-	%tmp3 = icmp ugt <2 x i32> %A, zeroinitializer;
+	%tmp3 = icmp ugt <2 x i32> %A, <i32 1, i32 1>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
 }
 
 define <4 x i32> @cmhiz4xi32(<4 x i32> %A) {
 ; CHECK-LABEL: cmhiz4xi32:
-; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK: movi {{v[0-9]+}}.4s, #{{0x1|1}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-	%tmp3 = icmp ugt <4 x i32> %A, zeroinitializer;
+	%tmp3 = icmp ugt <4 x i32> %A, <i32 1, i32 1, i32 1, i32 1>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @cmhiz2xi64(<2 x i64> %A) {
 ; CHECK-LABEL: cmhiz2xi64:
-; CHECK: movi {{v[0-9]+.(16b|2d)}}, #{{0x0|0}}
+; CHECK: orr w[[ONE:[0-9]+]], wzr, #{{0x1|1}}
+; CHECK-NEXT: dup {{v[0-9]+}}.2d, x[[ONE]]
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
-	%tmp3 = icmp ugt <2 x i64> %A, zeroinitializer;
+	%tmp3 = icmp ugt <2 x i64> %A, <i64 1, i64 1>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
 	ret <2 x i64> %tmp4
 }
@@ -1298,9 +1300,9 @@ define <8 x i8> @cmloz8xi8(<8 x i8> %A)
 ; CHECK-LABEL: cmloz8xi8:
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-; CHECK: movi {{v1.8b|d1}}, #{{0x0|0}}
+; CHECK: movi v1.8b, #{{0x2|2}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v1.8b, {{v[0-9]+}}.8b
-	%tmp3 = icmp ult <8 x i8> %A, zeroinitializer;
+	%tmp3 = icmp ult <8 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
 	ret <8 x i8> %tmp4
 }
@@ -1309,9 +1311,9 @@ define <16 x i8> @cmloz16xi8(<16 x i8> %
 ; CHECK-LABEL: cmloz16xi8:
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK: movi v1.16b, #{{0x2|2}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
-	%tmp3 = icmp ult <16 x i8> %A, zeroinitializer;
+	%tmp3 = icmp ult <16 x i8> %A, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
    %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
 	ret <16 x i8> %tmp4
 }
@@ -1320,9 +1322,9 @@ define <4 x i16> @cmloz4xi16(<4 x i16> %
 ; CHECK-LABEL: cmloz4xi16:
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-; CHECK: movi {{v1.8b|d1}}, #{{0x0|0}}
+; CHECK: movi v1.4h, #{{0x2|2}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
-	%tmp3 = icmp ult <4 x i16> %A, zeroinitializer;
+	%tmp3 = icmp ult <4 x i16> %A, <i16 2, i16 2, i16 2, i16 2>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
 	ret <4 x i16> %tmp4
 }
@@ -1331,9 +1333,9 @@ define <8 x i16> @cmloz8xi16(<8 x i16> %
 ; CHECK-LABEL: cmloz8xi16:
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK: movi v1.8h, #{{0x2|2}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
-	%tmp3 = icmp ult <8 x i16> %A, zeroinitializer;
+	%tmp3 = icmp ult <8 x i16> %A, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
    %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
 	ret <8 x i16> %tmp4
 }
@@ -1342,9 +1344,9 @@ define <2 x i32> @cmloz2xi32(<2 x i32> %
 ; CHECK-LABEL: cmloz2xi32:
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-; CHECK: movi {{v1.8b|d1}}, #{{0x0|0}}
+; CHECK: movi v1.2s, #{{0x2|2}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
-	%tmp3 = icmp ult <2 x i32> %A, zeroinitializer;
+	%tmp3 = icmp ult <2 x i32> %A, <i32 2, i32 2>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
 	ret <2 x i32> %tmp4
 }
@@ -1353,9 +1355,9 @@ define <4 x i32> @cmloz4xi32(<4 x i32> %
 ; CHECK-LABEL: cmloz4xi32:
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK: movi v1.4s, #{{0x2|2}}
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
-	%tmp3 = icmp ult <4 x i32> %A, zeroinitializer;
+	%tmp3 = icmp ult <4 x i32> %A, <i32 2, i32 2, i32 2, i32 2>
    %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
 	ret <4 x i32> %tmp4
 }
@@ -1364,9 +1366,10 @@ define <2 x i64> @cmloz2xi64(<2 x i64> %
 ; CHECK-LABEL: cmloz2xi64:
 ; Using registers other than v0, v1 are possible, but would be odd.
 ; LO implemented as HI, so check reversed operands.
-; CHECK: movi {{v1.16b|v1.2d}}, #{{0x0|0}}
+; CHECK: orr w[[TWO:[0-9]+]], wzr, #{{0x2|2}}
+; CHECK-NEXT: dup v1.2d, x[[TWO]]
 ; CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
-	%tmp3 = icmp ult <2 x i64> %A, zeroinitializer;
+	%tmp3 = icmp ult <2 x i64> %A, <i64 2, i64 2>
    %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
 	ret <2 x i64> %tmp4
 }

Modified: llvm/trunk/test/CodeGen/Mips/msa/compare.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/msa/compare.ll?rev=324436&r1=324435&r2=324436&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/msa/compare.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/msa/compare.ll Tue Feb  6 16:51:37 2018
@@ -671,9 +671,9 @@ define void @clti_u_v16i8(<16 x i8>* %c,
 
   %1 = load <16 x i8>, <16 x i8>* %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = icmp ult <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %2 = icmp ult <16 x i8> %1, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
   %3 = sext <16 x i1> %2 to <16 x i8>
-  ; CHECK-DAG: clti_u.b [[R3:\$w[0-9]+]], [[R1]], 1
+  ; CHECK-DAG: clti_u.b [[R3:\$w[0-9]+]], [[R1]], 2
   store <16 x i8> %3, <16 x i8>* %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
@@ -686,9 +686,9 @@ define void @clti_u_v8i16(<8 x i16>* %c,
 
   %1 = load <8 x i16>, <8 x i16>* %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = icmp ult <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %2 = icmp ult <8 x i16> %1, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
   %3 = sext <8 x i1> %2 to <8 x i16>
-  ; CHECK-DAG: clti_u.h [[R3:\$w[0-9]+]], [[R1]], 1
+  ; CHECK-DAG: clti_u.h [[R3:\$w[0-9]+]], [[R1]], 2
   store <8 x i16> %3, <8 x i16>* %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
@@ -701,9 +701,9 @@ define void @clti_u_v4i32(<4 x i32>* %c,
 
   %1 = load <4 x i32>, <4 x i32>* %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = icmp ult <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
+  %2 = icmp ult <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
   %3 = sext <4 x i1> %2 to <4 x i32>
-  ; CHECK-DAG: clti_u.w [[R3:\$w[0-9]+]], [[R1]], 1
+  ; CHECK-DAG: clti_u.w [[R3:\$w[0-9]+]], [[R1]], 2
   store <4 x i32> %3, <4 x i32>* %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
@@ -716,9 +716,9 @@ define void @clti_u_v2i64(<2 x i64>* %c,
 
   %1 = load <2 x i64>, <2 x i64>* %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = icmp ult <2 x i64> %1, <i64 1, i64 1>
+  %2 = icmp ult <2 x i64> %1, <i64 2, i64 2>
   %3 = sext <2 x i1> %2 to <2 x i64>
-  ; CHECK-DAG: clti_u.d [[R3:\$w[0-9]+]], [[R1]], 1
+  ; CHECK-DAG: clti_u.d [[R3:\$w[0-9]+]], [[R1]], 2
   store <2 x i64> %3, <2 x i64>* %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 




More information about the llvm-commits mailing list