[llvm] 3f17da1 - [X86] Regenerate test checks with vpternlog comments

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 17 06:19:06 PDT 2024


Author: Simon Pilgrim
Date: 2024-10-17T14:18:49+01:00
New Revision: 3f17da1f45dfcafebff1ef7fba031eae86ce1720

URL: https://github.com/llvm/llvm-project/commit/3f17da1f45dfcafebff1ef7fba031eae86ce1720
DIFF: https://github.com/llvm/llvm-project/commit/3f17da1f45dfcafebff1ef7fba031eae86ce1720.diff

LOG: [X86] Regenerate test checks with vpternlog comments

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/combine-or-shuffle.ll
    llvm/test/CodeGen/X86/psubus.ll
    llvm/test/CodeGen/X86/sat-add.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/combine-or-shuffle.ll b/llvm/test/CodeGen/X86/combine-or-shuffle.ll
index 175d21a4f7067c..55b1cdeddb853e 100644
--- a/llvm/test/CodeGen/X86/combine-or-shuffle.ll
+++ b/llvm/test/CodeGen/X86/combine-or-shuffle.ll
@@ -808,7 +808,7 @@ define <2 x i64> @or_and_v2i64(<2 x i64> %a0) {
 ; AVX512-LABEL: or_and_v2i64:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [7,7]
-; AVX512-NEXT:    vpternlogq $200, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm1, %xmm0
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm0 = xmm1 & (xmm0 | mem)
 ; AVX512-NEXT:    retq
   %1 = and <2 x i64> %a0, <i64 7, i64 7>
   %2 = or <2 x i64> %1, <i64 3, i64 3>
@@ -837,7 +837,7 @@ define <4 x i32> @or_and_v4i32(<4 x i32> %a0) {
 ; AVX512-LABEL: or_and_v4i32:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpmovsxbd {{.*#+}} xmm1 = [3,3,15,7]
-; AVX512-NEXT:    vpternlogd $200, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
+; AVX512-NEXT:    vpternlogd {{.*#+}} xmm0 = xmm1 & (xmm0 | mem)
 ; AVX512-NEXT:    retq
   %1 = and <4 x i32> %a0, <i32 1, i32 3, i32 5, i32 7>
   %2 = or <4 x i32> %1, <i32 3, i32 2, i32 15, i32 2>

diff  --git a/llvm/test/CodeGen/X86/psubus.ll b/llvm/test/CodeGen/X86/psubus.ll
index 4d220c43dc478f..be8adf697d5c1b 100644
--- a/llvm/test/CodeGen/X86/psubus.ll
+++ b/llvm/test/CodeGen/X86/psubus.ll
@@ -146,7 +146,7 @@ define <4 x i32> @ashr_xor_and_custom(<4 x i32> %x) nounwind {
 ; AVX512-LABEL: ashr_xor_and_custom:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsrad $31, %xmm0, %xmm1
-; AVX512-NEXT:    vpternlogd $72, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
+; AVX512-NEXT:    vpternlogd {{.*#+}} xmm0 = xmm1 & (xmm0 ^ mem)
 ; AVX512-NEXT:    retq
   %signsplat = ashr <4 x i32> %x, <i32 undef, i32 31, i32 31, i32 31>
   %flipsign = xor <4 x i32> %x, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
@@ -187,7 +187,7 @@ define <4 x i32> @ashr_add_and_custom(<4 x i32> %x) nounwind {
 ; AVX512-LABEL: ashr_add_and_custom:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsrad $31, %xmm0, %xmm1
-; AVX512-NEXT:    vpternlogd $72, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
+; AVX512-NEXT:    vpternlogd {{.*#+}} xmm0 = xmm1 & (xmm0 ^ mem)
 ; AVX512-NEXT:    retq
   %signsplat = ashr <4 x i32> %x, <i32 undef, i32 31, i32 31, i32 31>
   %flipsign = add <4 x i32> %x, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
@@ -230,7 +230,7 @@ define <4 x i32> @usubsat_custom(<4 x i32> %x) nounwind {
 ; AVX512-LABEL: usubsat_custom:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpsrad $31, %xmm0, %xmm1
-; AVX512-NEXT:    vpternlogd $72, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
+; AVX512-NEXT:    vpternlogd {{.*#+}} xmm0 = xmm1 & (xmm0 ^ mem)
 ; AVX512-NEXT:    retq
   %res = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> <i32 2147483648, i32 2147483648, i32 2147483648, i32 undef>)
   ret <4 x i32> %res

diff  --git a/llvm/test/CodeGen/X86/sat-add.ll b/llvm/test/CodeGen/X86/sat-add.ll
index f78b57d895ee18..949902a5ebc47c 100644
--- a/llvm/test/CodeGen/X86/sat-add.ll
+++ b/llvm/test/CodeGen/X86/sat-add.ll
@@ -820,7 +820,7 @@ define <16 x i8> @unsigned_sat_variable_v16i8_using_min(<16 x i8> %x, <16 x i8>
 ; AVX512-LABEL: unsigned_sat_variable_v16i8_using_min:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa %xmm1, %xmm2
-; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm2 = ~xmm2
 ; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
@@ -875,10 +875,10 @@ define <16 x i8> @unsigned_sat_variable_v16i8_using_cmp_notval(<16 x i8> %x, <16
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm3
-; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm1 = ~xmm1
 ; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm1
 ; AVX512-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpternlogq $222, %xmm2, %xmm3, %xmm0
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm0 = xmm3 | (xmm0 ^ xmm2)
 ; AVX512-NEXT:    retq
   %noty = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %a = add <16 x i8> %x, %y
@@ -917,7 +917,7 @@ define <8 x i16> @unsigned_sat_variable_v8i16_using_min(<8 x i16> %x, <8 x i16>
 ; AVX512-LABEL: unsigned_sat_variable_v8i16_using_min:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa %xmm1, %xmm2
-; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm2 = ~xmm2
 ; AVX512-NEXT:    vpminuw %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
@@ -982,10 +982,10 @@ define <8 x i16> @unsigned_sat_variable_v8i16_using_cmp_notval(<8 x i16> %x, <8
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpaddw %xmm1, %xmm0, %xmm3
-; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm1 = ~xmm1
 ; AVX512-NEXT:    vpminuw %xmm1, %xmm0, %xmm1
 ; AVX512-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpternlogq $222, %xmm2, %xmm3, %xmm0
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm0 = xmm3 | (xmm0 ^ xmm2)
 ; AVX512-NEXT:    retq
   %noty = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %a = add <8 x i16> %x, %y
@@ -1029,7 +1029,7 @@ define <4 x i32> @unsigned_sat_variable_v4i32_using_min(<4 x i32> %x, <4 x i32>
 ; AVX512-LABEL: unsigned_sat_variable_v4i32_using_min:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa %xmm1, %xmm2
-; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm2 = ~xmm2
 ; AVX512-NEXT:    vpminud %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
@@ -1070,7 +1070,7 @@ define <4 x i32> @unsigned_sat_variable_v4i32_using_cmp_sum(<4 x i32> %x, <4 x i
 ; AVX512-LABEL: unsigned_sat_variable_v4i32_using_cmp_sum:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa %xmm1, %xmm2
-; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm2 = ~xmm2
 ; AVX512-NEXT:    vpminud %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
@@ -1117,7 +1117,7 @@ define <4 x i32> @unsigned_sat_variable_v4i32_using_cmp_notval(<4 x i32> %x, <4
 ; AVX512-LABEL: unsigned_sat_variable_v4i32_using_cmp_notval:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa %xmm1, %xmm3
-; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm3
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm3 = ~xmm3
 ; AVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpcmpleud %xmm3, %xmm0, %k1
 ; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm2 {%k1}
@@ -1202,7 +1202,7 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_min(<2 x i64> %x, <2 x i64>
 ; AVX512-LABEL: unsigned_sat_variable_v2i64_using_min:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa %xmm1, %xmm2
-; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm2 = ~xmm2
 ; AVX512-NEXT:    vpminuq %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
@@ -1272,7 +1272,7 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_sum(<2 x i64> %x, <2 x i
 ; AVX512-LABEL: unsigned_sat_variable_v2i64_using_cmp_sum:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa %xmm1, %xmm2
-; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm2 = ~xmm2
 ; AVX512-NEXT:    vpminuq %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
@@ -1339,7 +1339,7 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_notval(<2 x i64> %x, <2
 ; AVX512-LABEL: unsigned_sat_variable_v2i64_using_cmp_notval:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovdqa %xmm1, %xmm3
-; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm3
+; AVX512-NEXT:    vpternlogq {{.*#+}} xmm3 = ~xmm3
 ; AVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vpcmpleuq %xmm3, %xmm0, %k1
 ; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm2 {%k1}


        


More information about the llvm-commits mailing list