[llvm] r305646 - x86] adjust test constants to maintain coverage; NFC
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Sun Jun 18 07:45:23 PDT 2017
Author: spatel
Date: Sun Jun 18 09:45:23 2017
New Revision: 305646
URL: http://llvm.org/viewvc/llvm-project?rev=305646&view=rev
Log:
x86] adjust test constants to maintain coverage; NFC
Increment (add 1) could be transformed to sub -1, and we'd lose coverage for these patterns.
Modified:
llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
llvm/trunk/test/CodeGen/X86/avx512-arith.ll
llvm/trunk/test/CodeGen/X86/avx512-logic.ll
llvm/trunk/test/CodeGen/X86/avx512vl-logic.ll
Modified: llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll?rev=305646&r1=305645&r2=305646&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll Sun Jun 18 09:45:23 2017
@@ -653,7 +653,7 @@ define <8 x i32> @V111(<8 x i32> %in) no
; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
entry:
- %g = add <8 x i32> %in, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %g = add <8 x i32> %in, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
ret <8 x i32> %g
}
Modified: llvm/trunk/test/CodeGen/X86/avx512-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-arith.ll?rev=305646&r1=305645&r2=305646&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-arith.ll Sun Jun 18 09:45:23 2017
@@ -348,7 +348,7 @@ define <8 x i64> @vpaddq_broadcast_test(
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
- %x = add <8 x i64> %i, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %x = add <8 x i64> %i, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
ret <8 x i64> %x
}
@@ -394,7 +394,7 @@ define <16 x i32> @vpaddd_broadcast_test
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
- %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = add <16 x i32> %i, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
ret <16 x i32> %x
}
@@ -446,7 +446,7 @@ define <16 x i32> @vpaddd_mask_broadcast
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
- %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = add <16 x i32> %i, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
%r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %i
ret <16 x i32> %r
}
@@ -473,7 +473,7 @@ define <16 x i32> @vpaddd_maskz_broadcas
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
- %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = add <16 x i32> %i, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
%r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> zeroinitializer
ret <16 x i32> %r
}
Modified: llvm/trunk/test/CodeGen/X86/avx512-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-logic.ll?rev=305646&r1=305645&r2=305646&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-logic.ll Sun Jun 18 09:45:23 2017
@@ -11,8 +11,8 @@ define <16 x i32> @vpandd(<16 x i32> %a,
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2,
+ i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
%x = and <16 x i32> %a2, %b
ret <16 x i32> %x
}
@@ -25,8 +25,8 @@ define <16 x i32> @vpandnd(<16 x i32> %a
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3,
+ i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
%b2 = xor <16 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1,
i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%x = and <16 x i32> %a2, %b2
@@ -41,8 +41,8 @@ define <16 x i32> @vpord(<16 x i32> %a,
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4,
+ i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
%x = or <16 x i32> %a2, %b
ret <16 x i32> %x
}
@@ -55,8 +55,8 @@ define <16 x i32> @vpxord(<16 x i32> %a,
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5,
+ i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
%x = xor <16 x i32> %a2, %b
ret <16 x i32> %x
}
@@ -69,7 +69,7 @@ define <8 x i64> @vpandq(<8 x i64> %a, <
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 6, i64 6, i64 6, i64 6, i64 6, i64 6, i64 6, i64 6>
%x = and <8 x i64> %a2, %b
ret <8 x i64> %x
}
@@ -82,7 +82,7 @@ define <8 x i64> @vpandnq(<8 x i64> %a,
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
%b2 = xor <8 x i64> %b, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
%x = and <8 x i64> %a2, %b2
ret <8 x i64> %x
@@ -96,7 +96,7 @@ define <8 x i64> @vporq(<8 x i64> %a, <8
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
%x = or <8 x i64> %a2, %b
ret <8 x i64> %x
}
@@ -109,7 +109,7 @@ define <8 x i64> @vpxorq(<8 x i64> %a, <
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 9, i64 9, i64 9, i64 9, i64 9, i64 9, i64 9, i64 9>
%x = xor <8 x i64> %a2, %b
ret <8 x i64> %x
}
Modified: llvm/trunk/test/CodeGen/X86/avx512vl-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-logic.ll?rev=305646&r1=305645&r2=305646&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-logic.ll Sun Jun 18 09:45:23 2017
@@ -12,7 +12,7 @@ define <8 x i32> @vpandd256(<8 x i32> %a
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
%x = and <8 x i32> %a2, %b
ret <8 x i32> %x
}
@@ -25,7 +25,7 @@ define <8 x i32> @vpandnd256(<8 x i32> %
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
%b2 = xor <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%x = and <8 x i32> %a2, %b2
ret <8 x i32> %x
@@ -39,7 +39,7 @@ define <8 x i32> @vpord256(<8 x i32> %a,
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
%x = or <8 x i32> %a2, %b
ret <8 x i32> %x
}
@@ -52,7 +52,7 @@ define <8 x i32> @vpxord256(<8 x i32> %a
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
%x = xor <8 x i32> %a2, %b
ret <8 x i32> %x
}
@@ -65,7 +65,7 @@ define <4 x i64> @vpandq256(<4 x i64> %a
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 6, i64 6, i64 6, i64 6>
%x = and <4 x i64> %a2, %b
ret <4 x i64> %x
}
@@ -78,7 +78,7 @@ define <4 x i64> @vpandnq256(<4 x i64> %
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
%b2 = xor <4 x i64> %b, <i64 -1, i64 -1, i64 -1, i64 -1>
%x = and <4 x i64> %a2, %b2
ret <4 x i64> %x
@@ -92,7 +92,7 @@ define <4 x i64> @vporq256(<4 x i64> %a,
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 21, i64 21, i64 21, i64 21>
%x = or <4 x i64> %a2, %b
ret <4 x i64> %x
}
@@ -105,7 +105,7 @@ define <4 x i64> @vpxorq256(<4 x i64> %a
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 22, i64 22, i64 22, i64 22>
%x = xor <4 x i64> %a2, %b
ret <4 x i64> %x
}
@@ -120,7 +120,7 @@ define <4 x i32> @vpandd128(<4 x i32> %a
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 8, i32 8, i32 8, i32 8>
%x = and <4 x i32> %a2, %b
ret <4 x i32> %x
}
@@ -133,7 +133,7 @@ define <4 x i32> @vpandnd128(<4 x i32> %
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 9, i32 9, i32 9, i32 9>
%b2 = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
%x = and <4 x i32> %a2, %b2
ret <4 x i32> %x
@@ -147,7 +147,7 @@ define <4 x i32> @vpord128(<4 x i32> %a,
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 10, i32 10, i32 10, i32 10>
%x = or <4 x i32> %a2, %b
ret <4 x i32> %x
}
@@ -160,7 +160,7 @@ define <4 x i32> @vpxord128(<4 x i32> %a
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 11, i32 11, i32 11, i32 11>
%x = xor <4 x i32> %a2, %b
ret <4 x i32> %x
}
@@ -173,7 +173,7 @@ define <2 x i64> @vpandq128(<2 x i64> %a
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 12, i64 12>
%x = and <2 x i64> %a2, %b
ret <2 x i64> %x
}
@@ -186,7 +186,7 @@ define <2 x i64> @vpandnq128(<2 x i64> %
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 13, i64 13>
%b2 = xor <2 x i64> %b, <i64 -1, i64 -1>
%x = and <2 x i64> %a2, %b2
ret <2 x i64> %x
@@ -200,7 +200,7 @@ define <2 x i64> @vporq128(<2 x i64> %a,
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 14, i64 14>
%x = or <2 x i64> %a2, %b
ret <2 x i64> %x
}
@@ -213,7 +213,7 @@ define <2 x i64> @vpxorq128(<2 x i64> %a
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 15, i64 15>
%x = xor <2 x i64> %a2, %b
ret <2 x i64> %x
}
More information about the llvm-commits
mailing list