[llvm] r310790 - [X86][TBM] Add tests showing failure to fold RFLAGS result into TBM instructions.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 13 12:15:37 PDT 2017


Looks like there are two different issues preventing the folding. If you
use the data result from the instruction we emit a And_flag/Or_flag/etc
node which we don't match to TBM. If you don't use the result we emit a
regular 'and' with a cmp, 0 but we aggressively pattern match the cmp 0 +
and to a test instruction.

I suppose we can add more patterns to catch these cases. Do we only care
about the Z flag when we create an And_flag/Or_flag/etc operation?

~Craig

On Sun, Aug 13, 2017 at 5:16 AM, Simon Pilgrim via llvm-commits <
llvm-commits at lists.llvm.org> wrote:

> Author: rksimon
> Date: Sun Aug 13 05:16:00 2017
> New Revision: 310790
>
> URL: http://llvm.org/viewvc/llvm-project?rev=310790&view=rev
> Log:
> [X86][TBM] Add tests showing failure to fold RFLAGS result into TBM
> instructions.
>
> And fails to select TBM instructions at all.
>
> Modified:
>     llvm/trunk/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
>     llvm/trunk/test/CodeGen/X86/tbm_patterns.ll
>
> Modified: llvm/trunk/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/
> CodeGen/X86/tbm-intrinsics-x86_64.ll?rev=310790&r1=
> 310789&r2=310790&view=diff
> ============================================================
> ==================
> --- llvm/trunk/test/CodeGen/X86/tbm-intrinsics-x86_64.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/tbm-intrinsics-x86_64.ll Sun Aug 13
> 05:16:00 2017
> @@ -24,6 +24,20 @@ entry:
>    ret i32 %0
>  }
>
> +define i32 @test_x86_tbm_bextri_u32_z(i32 %a, i32 %b) nounwind readonly {
> +; CHECK-LABEL: test_x86_tbm_bextri_u32_z:
> +; CHECK:       # BB#0: # %entry
> +; CHECK-NEXT:    bextr $2814, %edi, %eax # imm = 0xAFE
> +; CHECK-NEXT:    testl %eax, %eax
> +; CHECK-NEXT:    cmovel %esi, %eax
> +; CHECK-NEXT:    retq
> +entry:
> +  %0 = tail call i32 @llvm.x86.tbm.bextri.u32(i32 %a, i32 2814)
> +  %1 = icmp eq i32 %0, 0
> +  %2 = select i1 %1, i32 %b, i32 %0
> +  ret i32 %2
> +}
> +
>  define i64 @test_x86_tbm_bextri_u64(i64 %a) nounwind readnone {
>  ; CHECK-LABEL: test_x86_tbm_bextri_u64:
>  ; CHECK:       # BB#0: # %entry
> @@ -46,3 +60,17 @@ entry:
>    %0 = tail call i64 @llvm.x86.tbm.bextri.u64(i64 %tmp1, i64 2814)
>    ret i64 %0
>  }
> +
> +define i64 @test_x86_tbm_bextri_u64_z(i64 %a, i64 %b) nounwind readnone {
> +; CHECK-LABEL: test_x86_tbm_bextri_u64_z:
> +; CHECK:       # BB#0: # %entry
> +; CHECK-NEXT:    bextr $2814, %rdi, %rax # imm = 0xAFE
> +; CHECK-NEXT:    testq %rax, %rax
> +; CHECK-NEXT:    cmoveq %rsi, %rax
> +; CHECK-NEXT:    retq
> +entry:
> +  %0 = tail call i64 @llvm.x86.tbm.bextri.u64(i64 %a, i64 2814)
> +  %1 = icmp eq i64 %0, 0
> +  %2 = select i1 %1, i64 %b, i64 %0
> +  ret i64 %2
> +}
>
> Modified: llvm/trunk/test/CodeGen/X86/tbm_patterns.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/
> CodeGen/X86/tbm_patterns.ll?rev=310790&r1=310789&r2=310790&view=diff
> ============================================================
> ==================
> --- llvm/trunk/test/CodeGen/X86/tbm_patterns.ll (original)
> +++ llvm/trunk/test/CodeGen/X86/tbm_patterns.ll Sun Aug 13 05:16:00 2017
> @@ -1,6 +1,8 @@
>  ; NOTE: Assertions have been autogenerated by
> utils/update_llc_test_checks.py
>  ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+tbm < %s | FileCheck
> %s
>
> +; TODO - Patterns fail to fold with ZF flags and prevents TBM instruction
> selection.
> +
>  define i32 @test_x86_tbm_bextri_u32(i32 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_bextri_u32:
>  ; CHECK:       # BB#0:
> @@ -22,6 +24,21 @@ define i32 @test_x86_tbm_bextri_u32_m(i3
>    ret i32 %t2
>  }
>
> +define i32 @test_x86_tbm_bextri_u32_z(i32 %a, i32 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_bextri_u32_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    shrl $4, %edi
> +; CHECK-NEXT:    andl $4095, %edi # imm = 0xFFF
> +; CHECK-NEXT:    cmovel %esi, %edi
> +; CHECK-NEXT:    movl %edi, %eax
> +; CHECK-NEXT:    retq
> +  %t0 = lshr i32 %a, 4
> +  %t1 = and i32 %t0, 4095
> +  %t2 = icmp eq i32 %t1, 0
> +  %t3 = select i1 %t2, i32 %b, i32 %t1
> +  ret i32 %t3
> +}
> +
>  define i64 @test_x86_tbm_bextri_u64(i64 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_bextri_u64:
>  ; CHECK:       # BB#0:
> @@ -43,6 +60,21 @@ define i64 @test_x86_tbm_bextri_u64_m(i6
>    ret i64 %t2
>  }
>
> +define i64 @test_x86_tbm_bextri_u64_z(i64 %a, i64 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_bextri_u64_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    shrl $4, %edi
> +; CHECK-NEXT:    andl $4095, %edi # imm = 0xFFF
> +; CHECK-NEXT:    cmoveq %rsi, %rdi
> +; CHECK-NEXT:    movq %rdi, %rax
> +; CHECK-NEXT:    retq
> +  %t0 = lshr i64 %a, 4
> +  %t1 = and i64 %t0, 4095
> +  %t2 = icmp eq i64 %t1, 0
> +  %t3 = select i1 %t2, i64 %b, i64 %t1
> +  ret i64 %t3
> +}
> +
>  define i32 @test_x86_tbm_blcfill_u32(i32 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blcfill_u32:
>  ; CHECK:       # BB#0:
> @@ -53,6 +85,21 @@ define i32 @test_x86_tbm_blcfill_u32(i32
>    ret i32 %t1
>  }
>
> +define i32 @test_x86_tbm_blcfill_u32_z(i32 %a, i32 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blcfill_u32_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
> +; CHECK-NEXT:    leal 1(%rdi), %eax
> +; CHECK-NEXT:    andl %edi, %eax
> +; CHECK-NEXT:    cmovel %esi, %eax
> +; CHECK-NEXT:    retq
> +  %t0 = add i32 %a, 1
> +  %t1 = and i32 %t0, %a
> +  %t2 = icmp eq i32 %t1, 0
> +  %t3 = select i1 %t2, i32 %b, i32 %t1
> +  ret i32 %t3
> +}
> +
>  define i64 @test_x86_tbm_blcfill_u64(i64 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blcfill_u64:
>  ; CHECK:       # BB#0:
> @@ -63,6 +110,20 @@ define i64 @test_x86_tbm_blcfill_u64(i64
>    ret i64 %t1
>  }
>
> +define i64 @test_x86_tbm_blcfill_u64_z(i64 %a, i64 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blcfill_u64_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    leaq 1(%rdi), %rax
> +; CHECK-NEXT:    andq %rdi, %rax
> +; CHECK-NEXT:    cmoveq %rsi, %rax
> +; CHECK-NEXT:    retq
> +  %t0 = add i64 %a, 1
> +  %t1 = and i64 %t0, %a
> +  %t2 = icmp eq i64 %t1, 0
> +  %t3 = select i1 %t2, i64 %b, i64 %t1
> +  ret i64 %t3
> +}
> +
>  define i32 @test_x86_tbm_blci_u32(i32 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blci_u32:
>  ; CHECK:       # BB#0:
> @@ -74,6 +135,23 @@ define i32 @test_x86_tbm_blci_u32(i32 %a
>    ret i32 %t2
>  }
>
> +define i32 @test_x86_tbm_blci_u32_z(i32 %a, i32 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blci_u32_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
> +; CHECK-NEXT:    leal 1(%rdi), %eax
> +; CHECK-NEXT:    notl %eax
> +; CHECK-NEXT:    orl %edi, %eax
> +; CHECK-NEXT:    cmovel %esi, %eax
> +; CHECK-NEXT:    retq
> +  %t0 = add i32 1, %a
> +  %t1 = xor i32 %t0, -1
> +  %t2 = or i32 %t1, %a
> +  %t3 = icmp eq i32 %t2, 0
> +  %t4 = select i1 %t3, i32 %b, i32 %t2
> +  ret i32 %t4
> +}
> +
>  define i64 @test_x86_tbm_blci_u64(i64 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blci_u64:
>  ; CHECK:       # BB#0:
> @@ -85,6 +163,22 @@ define i64 @test_x86_tbm_blci_u64(i64 %a
>    ret i64 %t2
>  }
>
> +define i64 @test_x86_tbm_blci_u64_z(i64 %a, i64 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blci_u64_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    leaq 1(%rdi), %rax
> +; CHECK-NEXT:    notq %rax
> +; CHECK-NEXT:    orq %rdi, %rax
> +; CHECK-NEXT:    cmoveq %rsi, %rax
> +; CHECK-NEXT:    retq
> +  %t0 = add i64 1, %a
> +  %t1 = xor i64 %t0, -1
> +  %t2 = or i64 %t1, %a
> +  %t3 = icmp eq i64 %t2, 0
> +  %t4 = select i1 %t3, i64 %b, i64 %t2
> +  ret i64 %t4
> +}
> +
>  define i32 @test_x86_tbm_blci_u32_b(i32 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blci_u32_b:
>  ; CHECK:       # BB#0:
> @@ -116,6 +210,24 @@ define i32 @test_x86_tbm_blcic_u32(i32 %
>    ret i32 %t2
>  }
>
> +define i32 @test_x86_tbm_blcic_u32_z(i32 %a, i32 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blcic_u32_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
> +; CHECK-NEXT:    leal 1(%rdi), %eax
> +; CHECK-NEXT:    movl %edi, %ecx
> +; CHECK-NEXT:    notl %ecx
> +; CHECK-NEXT:    andl %ecx, %eax
> +; CHECK-NEXT:    cmovel %esi, %eax
> +; CHECK-NEXT:    retq
> +  %t0 = xor i32 %a, -1
> +  %t1 = add i32 %a, 1
> +  %t2 = and i32 %t1, %t0
> +  %t3 = icmp eq i32 %t2, 0
> +  %t4 = select i1 %t3, i32 %b, i32 %t2
> +  ret i32 %t4
> +}
> +
>  define i64 @test_x86_tbm_blcic_u64(i64 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blcic_u64:
>  ; CHECK:       # BB#0:
> @@ -127,6 +239,22 @@ define i64 @test_x86_tbm_blcic_u64(i64 %
>    ret i64 %t2
>  }
>
> +define i64 @test_x86_tbm_blcic_u64_z(i64 %a, i64 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blcic_u64_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    leaq 1(%rdi), %rax
> +; CHECK-NEXT:    notq %rdi
> +; CHECK-NEXT:    andq %rdi, %rax
> +; CHECK-NEXT:    cmoveq %rsi, %rax
> +; CHECK-NEXT:    retq
> +  %t0 = xor i64 %a, -1
> +  %t1 = add i64 %a, 1
> +  %t2 = and i64 %t1, %t0
> +  %t3 = icmp eq i64 %t2, 0
> +  %t4 = select i1 %t3, i64 %b, i64 %t2
> +  ret i64 %t4
> +}
> +
>  define i32 @test_x86_tbm_blcmsk_u32(i32 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blcmsk_u32:
>  ; CHECK:       # BB#0:
> @@ -137,6 +265,21 @@ define i32 @test_x86_tbm_blcmsk_u32(i32
>    ret i32 %t1
>  }
>
> +define i32 @test_x86_tbm_blcmsk_u32_z(i32 %a, i32 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blcmsk_u32_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
> +; CHECK-NEXT:    leal 1(%rdi), %eax
> +; CHECK-NEXT:    xorl %edi, %eax
> +; CHECK-NEXT:    cmovel %esi, %eax
> +; CHECK-NEXT:    retq
> +  %t0 = add i32 %a, 1
> +  %t1 = xor i32 %t0, %a
> +  %t2 = icmp eq i32 %t1, 0
> +  %t3 = select i1 %t2, i32 %b, i32 %t1
> +  ret i32 %t3
> +}
> +
>  define i64 @test_x86_tbm_blcmsk_u64(i64 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blcmsk_u64:
>  ; CHECK:       # BB#0:
> @@ -147,6 +290,20 @@ define i64 @test_x86_tbm_blcmsk_u64(i64
>    ret i64 %t1
>  }
>
> +define i64 @test_x86_tbm_blcmsk_u64_z(i64 %a, i64 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blcmsk_u64_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    leaq 1(%rdi), %rax
> +; CHECK-NEXT:    xorq %rdi, %rax
> +; CHECK-NEXT:    cmoveq %rsi, %rax
> +; CHECK-NEXT:    retq
> +  %t0 = add i64 %a, 1
> +  %t1 = xor i64 %t0, %a
> +  %t2 = icmp eq i64 %t1, 0
> +  %t3 = select i1 %t2, i64 %b, i64 %t1
> +  ret i64 %t3
> +}
> +
>  define i32 @test_x86_tbm_blcs_u32(i32 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blcs_u32:
>  ; CHECK:       # BB#0:
> @@ -157,6 +314,21 @@ define i32 @test_x86_tbm_blcs_u32(i32 %a
>    ret i32 %t1
>  }
>
> +define i32 @test_x86_tbm_blcs_u32_z(i32 %a, i32 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blcs_u32_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
> +; CHECK-NEXT:    leal 1(%rdi), %eax
> +; CHECK-NEXT:    orl %edi, %eax
> +; CHECK-NEXT:    cmovel %esi, %eax
> +; CHECK-NEXT:    retq
> +  %t0 = add i32 %a, 1
> +  %t1 = or i32 %t0, %a
> +  %t2 = icmp eq i32 %t1, 0
> +  %t3 = select i1 %t2, i32 %b, i32 %t1
> +  ret i32 %t3
> +}
> +
>  define i64 @test_x86_tbm_blcs_u64(i64 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blcs_u64:
>  ; CHECK:       # BB#0:
> @@ -167,6 +339,20 @@ define i64 @test_x86_tbm_blcs_u64(i64 %a
>    ret i64 %t1
>  }
>
> +define i64 @test_x86_tbm_blcs_u64_z(i64 %a, i64 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blcs_u64_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    leaq 1(%rdi), %rax
> +; CHECK-NEXT:    orq %rdi, %rax
> +; CHECK-NEXT:    cmoveq %rsi, %rax
> +; CHECK-NEXT:    retq
> +  %t0 = add i64 %a, 1
> +  %t1 = or i64 %t0, %a
> +  %t2 = icmp eq i64 %t1, 0
> +  %t3 = select i1 %t2, i64 %b, i64 %t1
> +  ret i64 %t3
> +}
> +
>  define i32 @test_x86_tbm_blsfill_u32(i32 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blsfill_u32:
>  ; CHECK:       # BB#0:
> @@ -177,6 +363,21 @@ define i32 @test_x86_tbm_blsfill_u32(i32
>    ret i32 %t1
>  }
>
> +define i32 @test_x86_tbm_blsfill_u32_z(i32 %a, i32 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blsfill_u32_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
> +; CHECK-NEXT:    leal -1(%rdi), %eax
> +; CHECK-NEXT:    orl %edi, %eax
> +; CHECK-NEXT:    cmovel %esi, %eax
> +; CHECK-NEXT:    retq
> +  %t0 = add i32 %a, -1
> +  %t1 = or i32 %t0, %a
> +  %t2 = icmp eq i32 %t1, 0
> +  %t3 = select i1 %t2, i32 %b, i32 %t1
> +  ret i32 %t3
> +}
> +
>  define i64 @test_x86_tbm_blsfill_u64(i64 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blsfill_u64:
>  ; CHECK:       # BB#0:
> @@ -187,6 +388,20 @@ define i64 @test_x86_tbm_blsfill_u64(i64
>    ret i64 %t1
>  }
>
> +define i64 @test_x86_tbm_blsfill_u64_z(i64 %a, i64 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blsfill_u64_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    leaq -1(%rdi), %rax
> +; CHECK-NEXT:    orq %rdi, %rax
> +; CHECK-NEXT:    cmoveq %rsi, %rax
> +; CHECK-NEXT:    retq
> +  %t0 = add i64 %a, -1
> +  %t1 = or i64 %t0, %a
> +  %t2 = icmp eq i64 %t1, 0
> +  %t3 = select i1 %t2, i64 %b, i64 %t1
> +  ret i64 %t3
> +}
> +
>  define i32 @test_x86_tbm_blsic_u32(i32 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blsic_u32:
>  ; CHECK:       # BB#0:
> @@ -198,6 +413,24 @@ define i32 @test_x86_tbm_blsic_u32(i32 %
>    ret i32 %t2
>  }
>
> +define i32 @test_x86_tbm_blsic_u32_z(i32 %a, i32 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blsic_u32_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    movl %edi, %eax
> +; CHECK-NEXT:    notl %eax
> +; CHECK-NEXT:    decl %edi
> +; CHECK-NEXT:    orl %eax, %edi
> +; CHECK-NEXT:    cmovel %esi, %edi
> +; CHECK-NEXT:    movl %edi, %eax
> +; CHECK-NEXT:    retq
> +  %t0 = xor i32 %a, -1
> +  %t1 = add i32 %a, -1
> +  %t2 = or i32 %t0, %t1
> +  %t3 = icmp eq i32 %t2, 0
> +  %t4 = select i1 %t3, i32 %b, i32 %t2
> +  ret i32 %t4
> +}
> +
>  define i64 @test_x86_tbm_blsic_u64(i64 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_blsic_u64:
>  ; CHECK:       # BB#0:
> @@ -209,6 +442,24 @@ define i64 @test_x86_tbm_blsic_u64(i64 %
>    ret i64 %t2
>  }
>
> +define i64 @test_x86_tbm_blsic_u64_z(i64 %a, i64 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_blsic_u64_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    movq %rdi, %rax
> +; CHECK-NEXT:    notq %rax
> +; CHECK-NEXT:    decq %rdi
> +; CHECK-NEXT:    orq %rax, %rdi
> +; CHECK-NEXT:    cmoveq %rsi, %rdi
> +; CHECK-NEXT:    movq %rdi, %rax
> +; CHECK-NEXT:    retq
> +  %t0 = xor i64 %a, -1
> +  %t1 = add i64 %a, -1
> +  %t2 = or i64 %t0, %t1
> +  %t3 = icmp eq i64 %t2, 0
> +  %t4 = select i1 %t3, i64 %b, i64 %t2
> +  ret i64 %t4
> +}
> +
>  define i32 @test_x86_tbm_t1mskc_u32(i32 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_t1mskc_u32:
>  ; CHECK:       # BB#0:
> @@ -220,8 +471,26 @@ define i32 @test_x86_tbm_t1mskc_u32(i32
>    ret i32 %t2
>  }
>
> -define i64 @Ttest_x86_tbm_t1mskc_u64(i64 %a) nounwind {
> -; CHECK-LABEL: Ttest_x86_tbm_t1mskc_u64:
> +define i32 @test_x86_tbm_t1mskc_u32_z(i32 %a, i32 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_t1mskc_u32_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    movl %edi, %eax
> +; CHECK-NEXT:    notl %eax
> +; CHECK-NEXT:    incl %edi
> +; CHECK-NEXT:    orl %eax, %edi
> +; CHECK-NEXT:    cmovel %esi, %edi
> +; CHECK-NEXT:    movl %edi, %eax
> +; CHECK-NEXT:    retq
> +  %t0 = xor i32 %a, -1
> +  %t1 = add i32 %a, 1
> +  %t2 = or i32 %t0, %t1
> +  %t3 = icmp eq i32 %t2, 0
> +  %t4 = select i1 %t3, i32 %b, i32 %t2
> +  ret i32 %t4
> +}
> +
> +define i64 @test_x86_tbm_t1mskc_u64(i64 %a) nounwind {
> +; CHECK-LABEL: test_x86_tbm_t1mskc_u64:
>  ; CHECK:       # BB#0:
>  ; CHECK-NEXT:    t1mskc %rdi, %rax
>  ; CHECK-NEXT:    retq
> @@ -231,6 +500,24 @@ define i64 @Ttest_x86_tbm_t1mskc_u64(i64
>    ret i64 %t2
>  }
>
> +define i64 @test_x86_tbm_t1mskc_u64_z(i64 %a, i64 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_t1mskc_u64_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    movq %rdi, %rax
> +; CHECK-NEXT:    notq %rax
> +; CHECK-NEXT:    incq %rdi
> +; CHECK-NEXT:    orq %rax, %rdi
> +; CHECK-NEXT:    cmoveq %rsi, %rdi
> +; CHECK-NEXT:    movq %rdi, %rax
> +; CHECK-NEXT:    retq
> +  %t0 = xor i64 %a, -1
> +  %t1 = add i64 %a, 1
> +  %t2 = or i64 %t0, %t1
> +  %t3 = icmp eq i64 %t2, 0
> +  %t4 = select i1 %t3, i64 %b, i64 %t2
> +  ret i64 %t4
> +}
> +
>  define i32 @test_x86_tbm_tzmsk_u32(i32 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_tzmsk_u32:
>  ; CHECK:       # BB#0:
> @@ -242,6 +529,24 @@ define i32 @test_x86_tbm_tzmsk_u32(i32 %
>    ret i32 %t2
>  }
>
> +define i32 @test_x86_tbm_tzmsk_u32_z(i32 %a, i32 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_tzmsk_u32_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    movl %edi, %eax
> +; CHECK-NEXT:    notl %eax
> +; CHECK-NEXT:    decl %edi
> +; CHECK-NEXT:    andl %eax, %edi
> +; CHECK-NEXT:    cmovel %esi, %edi
> +; CHECK-NEXT:    movl %edi, %eax
> +; CHECK-NEXT:    retq
> +  %t0 = xor i32 %a, -1
> +  %t1 = add i32 %a, -1
> +  %t2 = and i32 %t0, %t1
> +  %t3 = icmp eq i32 %t2, 0
> +  %t4 = select i1 %t3, i32 %b, i32 %t2
> +  ret i32 %t4
> +}
> +
>  define i64 @test_x86_tbm_tzmsk_u64(i64 %a) nounwind {
>  ; CHECK-LABEL: test_x86_tbm_tzmsk_u64:
>  ; CHECK:       # BB#0:
> @@ -253,6 +558,24 @@ define i64 @test_x86_tbm_tzmsk_u64(i64 %
>    ret i64 %t2
>  }
>
> +define i64 @test_x86_tbm_tzmsk_u64_z(i64 %a, i64 %b) nounwind {
> +; CHECK-LABEL: test_x86_tbm_tzmsk_u64_z:
> +; CHECK:       # BB#0:
> +; CHECK-NEXT:    movq %rdi, %rax
> +; CHECK-NEXT:    notq %rax
> +; CHECK-NEXT:    decq %rdi
> +; CHECK-NEXT:    andq %rax, %rdi
> +; CHECK-NEXT:    cmoveq %rsi, %rdi
> +; CHECK-NEXT:    movq %rdi, %rax
> +; CHECK-NEXT:    retq
> +  %t0 = xor i64 %a, -1
> +  %t1 = add i64 %a, -1
> +  %t2 = and i64 %t0, %t1
> +  %t3 = icmp eq i64 %t2, 0
> +  %t4 = select i1 %t3, i64 %b, i64 %t2
> +  ret i64 %t4
> +}
> +
>  define i64 @test_and_large_constant_mask(i64 %x) {
>  ; CHECK-LABEL: test_and_large_constant_mask:
>  ; CHECK:       # BB#0: # %entry
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20170813/4fd783d1/attachment.html>


More information about the llvm-commits mailing list