[llvm] bb754f2 - [X86] avx512 intrinsics tests - regenerate VPTERNLOG comments

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 22 09:24:04 PST 2025


Author: Simon Pilgrim
Date: 2025-01-22T17:23:37Z
New Revision: bb754f2c98ddeeadf75f21e6fbc6bd03898f008c

URL: https://github.com/llvm/llvm-project/commit/bb754f2c98ddeeadf75f21e6fbc6bd03898f008c
DIFF: https://github.com/llvm/llvm-project/commit/bb754f2c98ddeeadf75f21e6fbc6bd03898f008c.diff

LOG: [X86] avx512 intrinsics tests - regenerate VPTERNLOG comments

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
    llvm/test/CodeGen/X86/avx512-intrinsics.ll
    llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
index 1ca870add95b5d..a8574c0b7516c1 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
@@ -2218,7 +2218,7 @@ declare <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64>, <8 x i16>, i8)
 define <8 x i64> @test_mm512_ternarylogic_epi32(<8 x i64> %__A, <8 x i64> %__B, <8 x i64> %__C) {
 ; CHECK-LABEL: test_mm512_ternarylogic_epi32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpternlogd $4, %zmm2, %zmm1, %zmm0
+; CHECK-NEXT:    vpternlogd {{.*#+}} zmm0 = zmm1 & ~(zmm0 | zmm2)
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
@@ -2236,13 +2236,13 @@ define <8 x i64> @test_mm512_mask_ternarylogic_epi32(<8 x i64> %__A, i16 zeroext
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogd $4, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpternlogd {{.*#+}} zmm0 {%k1} = zmm1 & ~(zmm0 | zmm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_ternarylogic_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $4, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpternlogd {{.*#+}} zmm0 {%k1} = zmm1 & ~(zmm0 | zmm2)
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
@@ -2260,13 +2260,13 @@ define <8 x i64> @test_mm512_maskz_ternarylogic_epi32(i16 zeroext %__U, <8 x i64
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogd $4, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    vpternlogd {{.*#+}} zmm0 {%k1} {z} = zmm1 & ~(zmm0 | zmm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_ternarylogic_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $4, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    vpternlogd {{.*#+}} zmm0 {%k1} {z} = zmm1 & ~(zmm0 | zmm2)
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <8 x i64> %__A to <16 x i32>
@@ -2282,7 +2282,7 @@ entry:
 define <8 x i64> @test_mm512_ternarylogic_epi64(<8 x i64> %__A, <8 x i64> %__B, <8 x i64> %__C) {
 ; CHECK-LABEL: test_mm512_ternarylogic_epi64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpternlogq $4, %zmm2, %zmm1, %zmm0
+; CHECK-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm1 & ~(zmm0 | zmm2)
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %__A, <8 x i64> %__B, <8 x i64> %__C, i32 4)
@@ -2296,13 +2296,13 @@ define <8 x i64> @test_mm512_mask_ternarylogic_epi64(<8 x i64> %__A, i8 zeroext
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogq $4, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpternlogq {{.*#+}} zmm0 {%k1} = zmm1 & ~(zmm0 | zmm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_mask_ternarylogic_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogq $4, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpternlogq {{.*#+}} zmm0 {%k1} = zmm1 & ~(zmm0 | zmm2)
 ; X64-NEXT:    retq
 entry:
   %0 = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %__A, <8 x i64> %__B, <8 x i64> %__C, i32 4)
@@ -2316,13 +2316,13 @@ define <8 x i64> @test_mm512_maskz_ternarylogic_epi64(i8 zeroext %__U, <8 x i64>
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogq $4, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    vpternlogq {{.*#+}} zmm0 {%k1} {z} = zmm1 & ~(zmm0 | zmm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm512_maskz_ternarylogic_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogq $4, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    vpternlogq {{.*#+}} zmm0 {%k1} {z} = zmm1 & ~(zmm0 | zmm2)
 ; X64-NEXT:    retq
 entry:
   %0 = tail call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %__A, <8 x i64> %__B, <8 x i64> %__C, i32 4)
@@ -6864,7 +6864,7 @@ define i64 @test_mm512_mask_reduce_and_epi64(i8 zeroext %__M, <8 x i64> %__W) {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X86-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
 ; X86-NEXT:    vpand %ymm0, %ymm1, %ymm0
@@ -6880,7 +6880,7 @@ define i64 @test_mm512_mask_reduce_and_epi64(i8 zeroext %__M, <8 x i64> %__W) {
 ; X64-LABEL: test_mm512_mask_reduce_and_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X64-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
 ; X64-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; X64-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
 ; X64-NEXT:    vpand %ymm0, %ymm1, %ymm0
@@ -7200,7 +7200,7 @@ define i32 @test_mm512_mask_reduce_and_epi32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X86-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
 ; X86-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
 ; X86-NEXT:    vpand %ymm0, %ymm1, %ymm0
@@ -7217,7 +7217,7 @@ define i32 @test_mm512_mask_reduce_and_epi32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X64-LABEL: test_mm512_mask_reduce_and_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X64-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
 ; X64-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; X64-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
 ; X64-NEXT:    vpand %ymm0, %ymm1, %ymm0
@@ -8176,7 +8176,7 @@ define i64 @test_mm512_mask_reduce_min_epu64(i8 zeroext %__M, <8 x i64> %__W) {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X86-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
 ; X86-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[4,5,6,7,0,1,2,3]
 ; X86-NEXT:    vpminuq %zmm0, %zmm1, %zmm0
@@ -8192,7 +8192,7 @@ define i64 @test_mm512_mask_reduce_min_epu64(i8 zeroext %__M, <8 x i64> %__W) {
 ; X64-LABEL: test_mm512_mask_reduce_min_epu64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X64-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
 ; X64-NEXT:    vmovdqa64 %zmm0, %zmm1 {%k1}
 ; X64-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[4,5,6,7,0,1,2,3]
 ; X64-NEXT:    vpminuq %zmm0, %zmm1, %zmm0
@@ -8778,7 +8778,7 @@ define i32 @test_mm512_mask_reduce_min_epu32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X86-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
 ; X86-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; X86-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
 ; X86-NEXT:    vpminud %ymm0, %ymm1, %ymm0
@@ -8795,7 +8795,7 @@ define i32 @test_mm512_mask_reduce_min_epu32(i16 zeroext %__M, <8 x i64> %__W) {
 ; X64-LABEL: test_mm512_mask_reduce_min_epu32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1
+; X64-NEXT:    vpternlogd {{.*#+}} zmm1 = -1
 ; X64-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; X64-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
 ; X64-NEXT:    vpminud %ymm0, %ymm1, %ymm0

diff  --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
index b77c753107a6e1..926af4e9957afb 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
@@ -5008,7 +5008,7 @@ declare <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x
 define <16 x i32>@test_int_x86_avx512_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_pternlog_d_512:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm0
+; CHECK-NEXT:    vpternlogd {{.*#+}} zmm0 = ~(zmm1 | (zmm0 ^ zmm2))
 ; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33)
   ret <16 x i32> %1
@@ -5018,13 +5018,13 @@ define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x
 ; X64-LABEL: test_int_x86_avx512_mask_pternlog_d_512:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpternlogd {{.*#+}} zmm0 {%k1} = ~(zmm1 | (zmm0 ^ zmm2))
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_int_x86_avx512_mask_pternlog_d_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpternlogd {{.*#+}} zmm0 {%k1} = ~(zmm1 | (zmm0 ^ zmm2))
 ; X86-NEXT:    retl
   %1 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33)
   %2 = bitcast i16 %x4 to <16 x i1>
@@ -5036,13 +5036,13 @@ define <16 x i32>@test_int_x86_avx512_maskz_pternlog_d_512(<16 x i32> %x0, <16 x
 ; X64-LABEL: test_int_x86_avx512_maskz_pternlog_d_512:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    vpternlogd {{.*#+}} zmm0 {%k1} {z} = ~(zmm1 | (zmm0 ^ zmm2))
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_int_x86_avx512_maskz_pternlog_d_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
-; X86-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    vpternlogd {{.*#+}} zmm0 {%k1} {z} = ~(zmm1 | (zmm0 ^ zmm2))
 ; X86-NEXT:    retl
   %1 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33)
   %2 = bitcast i16 %x4 to <16 x i1>
@@ -5055,7 +5055,7 @@ declare <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64>, <8 x i64>, <8 x i64
 define <8 x i64>@test_int_x86_avx512_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2) {
 ; CHECK-LABEL: test_int_x86_avx512_pternlog_q_512:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm0
+; CHECK-NEXT:    vpternlogq {{.*#+}} zmm0 = ~(zmm1 | (zmm0 ^ zmm2))
 ; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33)
   ret <8 x i64> %1
@@ -5065,14 +5065,14 @@ define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64
 ; X64-LABEL: test_int_x86_avx512_mask_pternlog_q_512:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpternlogq {{.*#+}} zmm0 {%k1} = ~(zmm1 | (zmm0 ^ zmm2))
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_int_x86_avx512_mask_pternlog_q_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpternlogq {{.*#+}} zmm0 {%k1} = ~(zmm1 | (zmm0 ^ zmm2))
 ; X86-NEXT:    retl
   %1 = call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33)
   %2 = bitcast i8 %x4 to <8 x i1>
@@ -5084,14 +5084,14 @@ define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i6
 ; X64-LABEL: test_int_x86_avx512_maskz_pternlog_q_512:
 ; X64:       # %bb.0:
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    vpternlogq {{.*#+}} zmm0 {%k1} {z} = ~(zmm1 | (zmm0 ^ zmm2))
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_int_x86_avx512_maskz_pternlog_q_512:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    vpternlogq {{.*#+}} zmm0 {%k1} {z} = ~(zmm1 | (zmm0 ^ zmm2))
 ; X86-NEXT:    retl
   %1 = call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33)
   %2 = bitcast i8 %x4 to <8 x i1>

diff  --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
index 87799c1e82fed3..1a60644b2fc228 100644
--- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
@@ -3457,7 +3457,7 @@ entry:
 define <2 x i64> @test_mm_ternarylogic_epi32(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> %__C) {
 ; CHECK-LABEL: test_mm_ternarylogic_epi32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpternlogd $4, %xmm2, %xmm1, %xmm0
+; CHECK-NEXT:    vpternlogd {{.*#+}} xmm0 = xmm1 & ~(xmm0 | xmm2)
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
@@ -3475,13 +3475,13 @@ define <2 x i64> @test_mm_mask_ternarylogic_epi32(<2 x i64> %__A, i8 zeroext %__
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogd $4, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    vpternlogd {{.*#+}} xmm0 {%k1} = xmm1 & ~(xmm0 | xmm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_ternarylogic_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $4, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    vpternlogd {{.*#+}} xmm0 {%k1} = xmm1 & ~(xmm0 | xmm2)
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
@@ -3500,13 +3500,13 @@ define <2 x i64> @test_mm_maskz_ternarylogic_epi32(i8 zeroext %__U, <2 x i64> %_
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogd $4, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X86-NEXT:    vpternlogd {{.*#+}} xmm0 {%k1} {z} = xmm1 & ~(xmm0 | xmm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_maskz_ternarylogic_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $4, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT:    vpternlogd {{.*#+}} xmm0 {%k1} {z} = xmm1 & ~(xmm0 | xmm2)
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <2 x i64> %__A to <4 x i32>
@@ -3523,7 +3523,7 @@ entry:
 define <4 x i64> @test_mm256_ternarylogic_epi32(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> %__C) {
 ; CHECK-LABEL: test_mm256_ternarylogic_epi32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpternlogd $4, %ymm2, %ymm1, %ymm0
+; CHECK-NEXT:    vpternlogd {{.*#+}} ymm0 = ymm1 & ~(ymm0 | ymm2)
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
@@ -3541,13 +3541,13 @@ define <4 x i64> @test_mm256_mask_ternarylogic_epi32(<4 x i64> %__A, i8 zeroext
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogd $4, %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT:    vpternlogd {{.*#+}} ymm0 {%k1} = ymm1 & ~(ymm0 | ymm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_mask_ternarylogic_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $4, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    vpternlogd {{.*#+}} ymm0 {%k1} = ymm1 & ~(ymm0 | ymm2)
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
@@ -3565,13 +3565,13 @@ define <4 x i64> @test_mm256_maskz_ternarylogic_epi32(i8 zeroext %__U, <4 x i64>
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogd $4, %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X86-NEXT:    vpternlogd {{.*#+}} ymm0 {%k1} {z} = ymm1 & ~(ymm0 | ymm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_maskz_ternarylogic_epi32:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogd $4, %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT:    vpternlogd {{.*#+}} ymm0 {%k1} {z} = ymm1 & ~(ymm0 | ymm2)
 ; X64-NEXT:    retq
 entry:
   %0 = bitcast <4 x i64> %__A to <8 x i32>
@@ -3587,7 +3587,7 @@ entry:
 define <2 x i64> @test_mm_ternarylogic_epi64(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> %__C) {
 ; CHECK-LABEL: test_mm_ternarylogic_epi64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpternlogq $4, %xmm2, %xmm1, %xmm0
+; CHECK-NEXT:    vpternlogq {{.*#+}} xmm0 = xmm1 & ~(xmm0 | xmm2)
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> %__C, i32 4)
@@ -3601,13 +3601,13 @@ define <2 x i64> @test_mm_mask_ternarylogic_epi64(<2 x i64> %__A, i8 zeroext %__
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogq $4, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    vpternlogq {{.*#+}} xmm0 {%k1} = xmm1 & ~(xmm0 | xmm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_mask_ternarylogic_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogq $4, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    vpternlogq {{.*#+}} xmm0 {%k1} = xmm1 & ~(xmm0 | xmm2)
 ; X64-NEXT:    retq
 entry:
   %0 = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> %__C, i32 4)
@@ -3622,13 +3622,13 @@ define <2 x i64> @test_mm_maskz_ternarylogic_epi64(i8 zeroext %__U, <2 x i64> %_
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogq $4, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X86-NEXT:    vpternlogq {{.*#+}} xmm0 {%k1} {z} = xmm1 & ~(xmm0 | xmm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm_maskz_ternarylogic_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogq $4, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT:    vpternlogq {{.*#+}} xmm0 {%k1} {z} = xmm1 & ~(xmm0 | xmm2)
 ; X64-NEXT:    retq
 entry:
   %0 = tail call <2 x i64> @llvm.x86.avx512.pternlog.q.128(<2 x i64> %__A, <2 x i64> %__B, <2 x i64> %__C, i32 4)
@@ -3641,7 +3641,7 @@ entry:
 define <4 x i64> @test_mm256_ternarylogic_epi64(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> %__C) {
 ; CHECK-LABEL: test_mm256_ternarylogic_epi64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vpternlogq $4, %ymm2, %ymm1, %ymm0
+; CHECK-NEXT:    vpternlogq {{.*#+}} ymm0 = ymm1 & ~(ymm0 | ymm2)
 ; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   %0 = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> %__C, i32 4)
@@ -3655,13 +3655,13 @@ define <4 x i64> @test_mm256_mask_ternarylogic_epi64(<4 x i64> %__A, i8 zeroext
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogq $4, %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT:    vpternlogq {{.*#+}} ymm0 {%k1} = ymm1 & ~(ymm0 | ymm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_mask_ternarylogic_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogq $4, %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT:    vpternlogq {{.*#+}} ymm0 {%k1} = ymm1 & ~(ymm0 | ymm2)
 ; X64-NEXT:    retq
 entry:
   %0 = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> %__C, i32 4)
@@ -3676,13 +3676,13 @@ define <4 x i64> @test_mm256_maskz_ternarylogic_epi64(i8 zeroext %__U, <4 x i64>
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    kmovw %eax, %k1
-; X86-NEXT:    vpternlogq $4, %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X86-NEXT:    vpternlogq {{.*#+}} ymm0 {%k1} {z} = ymm1 & ~(ymm0 | ymm2)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_mm256_maskz_ternarylogic_epi64:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    kmovw %edi, %k1
-; X64-NEXT:    vpternlogq $4, %ymm2, %ymm1, %ymm0 {%k1} {z}
+; X64-NEXT:    vpternlogq {{.*#+}} ymm0 {%k1} {z} = ymm1 & ~(ymm0 | ymm2)
 ; X64-NEXT:    retq
 entry:
   %0 = tail call <4 x i64> @llvm.x86.avx512.pternlog.q.256(<4 x i64> %__A, <4 x i64> %__B, <4 x i64> %__C, i32 4)


        


More information about the llvm-commits mailing list