[llvm] afed50a - [X86] Add test cases for PR48768 and D94856. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 30 23:14:37 PDT 2021


Author: Craig Topper
Date: 2021-03-30T23:11:28-07:00
New Revision: afed50a14b34eb619624aed5c85f4f610f360650

URL: https://github.com/llvm/llvm-project/commit/afed50a14b34eb619624aed5c85f4f610f360650
DIFF: https://github.com/llvm/llvm-project/commit/afed50a14b34eb619624aed5c85f4f610f360650.diff

LOG: [X86] Add test cases for PR48768 and D94856. NFC

This covers the BMI and TBM instructions. More tests will be
needed for other instructions.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/bmi.ll
    llvm/test/CodeGen/X86/tbm_patterns.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/bmi.ll b/llvm/test/CodeGen/X86/bmi.ll
index cbb6ecfd7db0..641b03ea92f6 100644
--- a/llvm/test/CodeGen/X86/bmi.ll
+++ b/llvm/test/CodeGen/X86/bmi.ll
@@ -539,6 +539,31 @@ define i32 @blsi32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t3
 }
 
+define i32 @blsi32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; X86-LABEL: blsi32_sle:
+; X86:       # %bb.0:
+; X86-NEXT:    blsil {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    testl %eax, %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovlel %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsi32_sle:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    blsil %edi, %ecx
+; X64-NEXT:    testl %ecx, %ecx
+; X64-NEXT:    cmovgl %edx, %eax
+; X64-NEXT:    retq
+  %t0 = sub i32 0, %a
+  %t1 = and i32 %t0, %a
+  %t2 = icmp sle i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %c
+  ret i32 %t3
+}
+
 define i64 @blsi64(i64 %x)   {
 ; X86-LABEL: blsi64:
 ; X86:       # %bb.0:
@@ -580,11 +605,11 @@ define i64 @blsi64_z(i64 %a, i64 %b) nounwind {
 ; X86-NEXT:    andl %ecx, %eax
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    orl %edx, %ecx
-; X86-NEXT:    jne .LBB27_2
+; X86-NEXT:    jne .LBB28_2
 ; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:  .LBB27_2:
+; X86-NEXT:  .LBB28_2:
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 ;
@@ -634,6 +659,42 @@ define i64 @blsi64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t3
 }
 
+define i64 @blsi64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; X86-LABEL: blsi64_sle:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    negl %esi
+; X86-NEXT:    sbbl %ecx, %edx
+; X86-NEXT:    andl %ecx, %edx
+; X86-NEXT:    andl %eax, %esi
+; X86-NEXT:    cmpl $1, %esi
+; X86-NEXT:    sbbl $0, %edx
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovll %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    movl 4(%ecx), %edx
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsi64_sle:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rax
+; X64-NEXT:    blsiq %rdi, %rcx
+; X64-NEXT:    testq %rcx, %rcx
+; X64-NEXT:    cmovgq %rdx, %rax
+; X64-NEXT:    retq
+  %t0 = sub i64 0, %a
+  %t1 = and i64 %t0, %a
+  %t2 = icmp sle i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %c
+  ret i64 %t3
+}
+
 define i32 @blsmsk32(i32 %x)   {
 ; X86-LABEL: blsmsk32:
 ; X86:       # %bb.0:
@@ -670,10 +731,10 @@ define i32 @blsmsk32_z(i32 %a, i32 %b) nounwind {
 ; X86-LABEL: blsmsk32_z:
 ; X86:       # %bb.0:
 ; X86-NEXT:    blsmskl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    jne .LBB31_2
+; X86-NEXT:    jne .LBB33_2
 ; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:  .LBB31_2:
+; X86-NEXT:  .LBB33_2:
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: blsmsk32_z:
@@ -711,6 +772,31 @@ define i32 @blsmsk32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t3
 }
 
+define i32 @blsmsk32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; X86-LABEL: blsmsk32_sle:
+; X86:       # %bb.0:
+; X86-NEXT:    blsmskl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    testl %eax, %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovlel %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsmsk32_sle:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    blsmskl %edi, %ecx
+; X64-NEXT:    testl %ecx, %ecx
+; X64-NEXT:    cmovgl %edx, %eax
+; X64-NEXT:    retq
+  %t0 = sub i32 %a, 1
+  %t1 = xor i32 %t0, %a
+  %t2 = icmp sle i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %c
+  ret i32 %t3
+}
+
 define i64 @blsmsk64(i64 %x)   {
 ; X86-LABEL: blsmsk64:
 ; X86:       # %bb.0:
@@ -752,11 +838,11 @@ define i64 @blsmsk64_z(i64 %a, i64 %b) nounwind {
 ; X86-NEXT:    xorl %esi, %edx
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    orl %edx, %ecx
-; X86-NEXT:    jne .LBB34_2
+; X86-NEXT:    jne .LBB37_2
 ; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:  .LBB34_2:
+; X86-NEXT:  .LBB37_2:
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 ;
@@ -806,6 +892,42 @@ define i64 @blsmsk64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t3
 }
 
+define i64 @blsmsk64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; X86-LABEL: blsmsk64_sle:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    addl $-1, %edx
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    adcl $-1, %esi
+; X86-NEXT:    xorl %ecx, %esi
+; X86-NEXT:    xorl %eax, %edx
+; X86-NEXT:    cmpl $1, %edx
+; X86-NEXT:    sbbl $0, %esi
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovll %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    movl 4(%ecx), %edx
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsmsk64_sle:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rax
+; X64-NEXT:    blsmskq %rdi, %rcx
+; X64-NEXT:    testq %rcx, %rcx
+; X64-NEXT:    cmovgq %rdx, %rax
+; X64-NEXT:    retq
+  %t0 = sub i64 %a, 1
+  %t1 = xor i64 %t0, %a
+  %t2 = icmp sle i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %c
+  ret i64 %t3
+}
+
 define i32 @blsr32(i32 %x)   {
 ; X86-LABEL: blsr32:
 ; X86:       # %bb.0:
@@ -842,10 +964,10 @@ define i32 @blsr32_z(i32 %a, i32 %b) nounwind {
 ; X86-LABEL: blsr32_z:
 ; X86:       # %bb.0:
 ; X86-NEXT:    blsrl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    jne .LBB38_2
+; X86-NEXT:    jne .LBB42_2
 ; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:  .LBB38_2:
+; X86-NEXT:  .LBB42_2:
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: blsr32_z:
@@ -883,6 +1005,31 @@ define i32 @blsr32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t3
 }
 
+define i32 @blsr32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; X86-LABEL: blsr32_sle:
+; X86:       # %bb.0:
+; X86-NEXT:    blsrl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    testl %eax, %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovlel %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsr32_sle:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    blsrl %edi, %ecx
+; X64-NEXT:    testl %ecx, %ecx
+; X64-NEXT:    cmovgl %edx, %eax
+; X64-NEXT:    retq
+  %t0 = sub i32 %a, 1
+  %t1 = and i32 %t0, %a
+  %t2 = icmp sle i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %c
+  ret i32 %t3
+}
+
 define i64 @blsr64(i64 %x)   {
 ; X86-LABEL: blsr64:
 ; X86:       # %bb.0:
@@ -924,11 +1071,11 @@ define i64 @blsr64_z(i64 %a, i64 %b) nounwind {
 ; X86-NEXT:    andl %esi, %edx
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    orl %edx, %ecx
-; X86-NEXT:    jne .LBB41_2
+; X86-NEXT:    jne .LBB46_2
 ; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:  .LBB41_2:
+; X86-NEXT:  .LBB46_2:
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 ;
@@ -978,6 +1125,42 @@ define i64 @blsr64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t3
 }
 
+define i64 @blsr64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; X86-LABEL: blsr64_sle:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    addl $-1, %edx
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    adcl $-1, %esi
+; X86-NEXT:    andl %ecx, %esi
+; X86-NEXT:    andl %eax, %edx
+; X86-NEXT:    cmpl $1, %edx
+; X86-NEXT:    sbbl $0, %esi
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cmovll %eax, %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    movl 4(%ecx), %edx
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: blsr64_sle:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rax
+; X64-NEXT:    blsrq %rdi, %rcx
+; X64-NEXT:    testq %rcx, %rcx
+; X64-NEXT:    cmovgq %rdx, %rax
+; X64-NEXT:    retq
+  %t0 = sub i64 %a, 1
+  %t1 = and i64 %t0, %a
+  %t2 = icmp sle i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %c
+  ret i64 %t3
+}
+
 ; PR35792 - https://bugs.llvm.org/show_bug.cgi?id=35792
 
 define i64 @blsr_disguised_constant(i64 %x) {
@@ -1021,27 +1204,26 @@ define i64 @blsr_disguised_shrunk_add(i64 %x) {
   ret i64 %c
 }
 
-; FIXME: We should not be using the S flag from BEXTR.
 define void @pr40060(i32, i32) {
 ; X86-LABEL: pr40060:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    bextrl %eax, {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    testl %eax, %eax
-; X86-NEXT:    js .LBB45_1
+; X86-NEXT:    js .LBB51_1
 ; X86-NEXT:  # %bb.2:
 ; X86-NEXT:    jmp bar # TAILCALL
-; X86-NEXT:  .LBB45_1:
+; X86-NEXT:  .LBB51_1:
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: pr40060:
 ; X64:       # %bb.0:
 ; X64-NEXT:    bextrl %esi, %edi, %eax
 ; X64-NEXT:    testl %eax, %eax
-; X64-NEXT:    js .LBB45_1
+; X64-NEXT:    js .LBB51_1
 ; X64-NEXT:  # %bb.2:
 ; X64-NEXT:    jmp bar # TAILCALL
-; X64-NEXT:  .LBB45_1:
+; X64-NEXT:  .LBB51_1:
 ; X64-NEXT:    retq
   %3 = tail call i32 @llvm.x86.bmi.bextr.32(i32 %0, i32 %1)
   %4 = icmp sgt i32 %3, -1
@@ -1060,10 +1242,10 @@ define i32 @blsr32_branch(i32 %x) {
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
 ; X86-NEXT:    blsrl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    jne .LBB46_2
+; X86-NEXT:    jne .LBB52_2
 ; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    calll bar
-; X86-NEXT:  .LBB46_2:
+; X86-NEXT:  .LBB52_2:
 ; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
@@ -1075,10 +1257,10 @@ define i32 @blsr32_branch(i32 %x) {
 ; X64-NEXT:    .cfi_def_cfa_offset 16
 ; X64-NEXT:    .cfi_offset %rbx, -16
 ; X64-NEXT:    blsrl %edi, %ebx
-; X64-NEXT:    jne .LBB46_2
+; X64-NEXT:    jne .LBB52_2
 ; X64-NEXT:  # %bb.1:
 ; X64-NEXT:    callq bar
-; X64-NEXT:  .LBB46_2:
+; X64-NEXT:  .LBB52_2:
 ; X64-NEXT:    movl %ebx, %eax
 ; X64-NEXT:    popq %rbx
 ; X64-NEXT:    .cfi_def_cfa_offset 8
@@ -1112,10 +1294,10 @@ define i64 @blsr64_branch(i64 %x) {
 ; X86-NEXT:    andl %ecx, %edi
 ; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    orl %edi, %eax
-; X86-NEXT:    jne .LBB47_2
+; X86-NEXT:    jne .LBB53_2
 ; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    calll bar
-; X86-NEXT:  .LBB47_2:
+; X86-NEXT:  .LBB53_2:
 ; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    movl %edi, %edx
 ; X86-NEXT:    popl %esi
@@ -1130,10 +1312,10 @@ define i64 @blsr64_branch(i64 %x) {
 ; X64-NEXT:    .cfi_def_cfa_offset 16
 ; X64-NEXT:    .cfi_offset %rbx, -16
 ; X64-NEXT:    blsrq %rdi, %rbx
-; X64-NEXT:    jne .LBB47_2
+; X64-NEXT:    jne .LBB53_2
 ; X64-NEXT:  # %bb.1:
 ; X64-NEXT:    callq bar
-; X64-NEXT:  .LBB47_2:
+; X64-NEXT:  .LBB53_2:
 ; X64-NEXT:    movq %rbx, %rax
 ; X64-NEXT:    popq %rbx
 ; X64-NEXT:    .cfi_def_cfa_offset 8
@@ -1155,10 +1337,10 @@ define i32 @blsi32_branch(i32 %x) {
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    .cfi_offset %esi, -8
 ; X86-NEXT:    blsil {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    jne .LBB48_2
+; X86-NEXT:    jne .LBB54_2
 ; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    calll bar
-; X86-NEXT:  .LBB48_2:
+; X86-NEXT:  .LBB54_2:
 ; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
@@ -1170,10 +1352,10 @@ define i32 @blsi32_branch(i32 %x) {
 ; X64-NEXT:    .cfi_def_cfa_offset 16
 ; X64-NEXT:    .cfi_offset %rbx, -16
 ; X64-NEXT:    blsil %edi, %ebx
-; X64-NEXT:    jne .LBB48_2
+; X64-NEXT:    jne .LBB54_2
 ; X64-NEXT:  # %bb.1:
 ; X64-NEXT:    callq bar
-; X64-NEXT:  .LBB48_2:
+; X64-NEXT:  .LBB54_2:
 ; X64-NEXT:    movl %ebx, %eax
 ; X64-NEXT:    popq %rbx
 ; X64-NEXT:    .cfi_def_cfa_offset 8
@@ -1207,10 +1389,10 @@ define i64 @blsi64_branch(i64 %x) {
 ; X86-NEXT:    andl %eax, %edi
 ; X86-NEXT:    movl %edi, %eax
 ; X86-NEXT:    orl %esi, %eax
-; X86-NEXT:    jne .LBB49_2
+; X86-NEXT:    jne .LBB55_2
 ; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    calll bar
-; X86-NEXT:  .LBB49_2:
+; X86-NEXT:  .LBB55_2:
 ; X86-NEXT:    movl %edi, %eax
 ; X86-NEXT:    movl %esi, %edx
 ; X86-NEXT:    popl %esi
@@ -1225,10 +1407,10 @@ define i64 @blsi64_branch(i64 %x) {
 ; X64-NEXT:    .cfi_def_cfa_offset 16
 ; X64-NEXT:    .cfi_offset %rbx, -16
 ; X64-NEXT:    blsiq %rdi, %rbx
-; X64-NEXT:    jne .LBB49_2
+; X64-NEXT:    jne .LBB55_2
 ; X64-NEXT:  # %bb.1:
 ; X64-NEXT:    callq bar
-; X64-NEXT:  .LBB49_2:
+; X64-NEXT:  .LBB55_2:
 ; X64-NEXT:    movq %rbx, %rax
 ; X64-NEXT:    popq %rbx
 ; X64-NEXT:    .cfi_def_cfa_offset 8
@@ -1249,19 +1431,19 @@ define void @pr42118_i32(i32 %x) {
 ; X86-LABEL: pr42118_i32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    blsrl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    jne .LBB50_1
+; X86-NEXT:    jne .LBB56_1
 ; X86-NEXT:  # %bb.2:
 ; X86-NEXT:    jmp bar # TAILCALL
-; X86-NEXT:  .LBB50_1:
+; X86-NEXT:  .LBB56_1:
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: pr42118_i32:
 ; X64:       # %bb.0:
 ; X64-NEXT:    blsrl %edi, %eax
-; X64-NEXT:    jne .LBB50_1
+; X64-NEXT:    jne .LBB56_1
 ; X64-NEXT:  # %bb.2:
 ; X64-NEXT:    jmp bar # TAILCALL
-; X64-NEXT:  .LBB50_1:
+; X64-NEXT:  .LBB56_1:
 ; X64-NEXT:    retq
   %tmp = sub i32 0, %x
   %tmp1 = and i32 %tmp, %x
@@ -1289,12 +1471,12 @@ define void @pr42118_i64(i64 %x) {
 ; X86-NEXT:    andl %eax, %edx
 ; X86-NEXT:    andl %ecx, %esi
 ; X86-NEXT:    orl %edx, %esi
-; X86-NEXT:    jne .LBB51_1
+; X86-NEXT:    jne .LBB57_1
 ; X86-NEXT:  # %bb.2:
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    jmp bar # TAILCALL
-; X86-NEXT:  .LBB51_1:
+; X86-NEXT:  .LBB57_1:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
@@ -1303,10 +1485,10 @@ define void @pr42118_i64(i64 %x) {
 ; X64-LABEL: pr42118_i64:
 ; X64:       # %bb.0:
 ; X64-NEXT:    blsrq %rdi, %rax
-; X64-NEXT:    jne .LBB51_1
+; X64-NEXT:    jne .LBB57_1
 ; X64-NEXT:  # %bb.2:
 ; X64-NEXT:    jmp bar # TAILCALL
-; X64-NEXT:  .LBB51_1:
+; X64-NEXT:  .LBB57_1:
 ; X64-NEXT:    retq
   %tmp = sub i64 0, %x
   %tmp1 = and i64 %tmp, %x
@@ -1324,11 +1506,11 @@ define i32 @blsi_cflag_32(i32 %x, i32 %y) nounwind {
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    testl %eax, %eax
-; X86-NEXT:    jne .LBB52_1
+; X86-NEXT:    jne .LBB58_1
 ; X86-NEXT:  # %bb.2:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    retl
-; X86-NEXT:  .LBB52_1:
+; X86-NEXT:  .LBB58_1:
 ; X86-NEXT:    blsil %eax, %eax
 ; X86-NEXT:    retl
 ;
@@ -1357,15 +1539,15 @@ define i64 @blsi_cflag_64(i64 %x, i64 %y) nounwind {
 ; X86-NEXT:    sbbl %esi, %edx
 ; X86-NEXT:    movl %ecx, %edi
 ; X86-NEXT:    orl %esi, %edi
-; X86-NEXT:    jne .LBB53_1
+; X86-NEXT:    jne .LBB59_1
 ; X86-NEXT:  # %bb.2:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    jmp .LBB53_3
-; X86-NEXT:  .LBB53_1:
+; X86-NEXT:    jmp .LBB59_3
+; X86-NEXT:  .LBB59_1:
 ; X86-NEXT:    andl %esi, %edx
 ; X86-NEXT:    andl %ecx, %eax
-; X86-NEXT:  .LBB53_3:
+; X86-NEXT:  .LBB59_3:
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
 ; X86-NEXT:    retl

diff  --git a/llvm/test/CodeGen/X86/tbm_patterns.ll b/llvm/test/CodeGen/X86/tbm_patterns.ll
index a21c092b986b..5f5306a722b1 100644
--- a/llvm/test/CodeGen/X86/tbm_patterns.ll
+++ b/llvm/test/CodeGen/X86/tbm_patterns.ll
@@ -61,6 +61,21 @@ define i32 @test_x86_tbm_bextri_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t3
 }
 
+define i32 @test_x86_tbm_bextri_u32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_bextri_u32_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    bextrl $3076, %edi, %ecx # imm = 0xC04
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovgl %edx, %eax
+; CHECK-NEXT:    retq
+  %t0 = lshr i32 %a, 4
+  %t1 = and i32 %t0, 4095
+  %t2 = icmp sle i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %c
+  ret i32 %t3
+}
+
 define i64 @test_x86_tbm_bextri_u64(i64 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_bextri_u64:
 ; CHECK:       # %bb.0:
@@ -121,6 +136,21 @@ define i64 @test_x86_tbm_bextri_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t3
 }
 
+define i64 @test_x86_tbm_bextri_u64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_bextri_u64_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    bextrl $3076, %edi, %ecx # imm = 0xC04
+; CHECK-NEXT:    testq %rcx, %rcx
+; CHECK-NEXT:    cmovgq %rdx, %rax
+; CHECK-NEXT:    retq
+  %t0 = lshr i64 %a, 4
+  %t1 = and i64 %t0, 4095
+  %t2 = icmp sle i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %c
+  ret i64 %t3
+}
+
 define i32 @test_x86_tbm_blcfill_u32(i32 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blcfill_u32:
 ; CHECK:       # %bb.0:
@@ -158,6 +188,21 @@ define i32 @test_x86_tbm_blcfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t3
 }
 
+define i32 @test_x86_tbm_blcfill_u32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcfill_u32_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    blcfilll %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovgl %edx, %eax
+; CHECK-NEXT:    retq
+  %t0 = add i32 %a, 1
+  %t1 = and i32 %t0, %a
+  %t2 = icmp sle i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %c
+  ret i32 %t3
+}
+
 define i64 @test_x86_tbm_blcfill_u64(i64 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blcfill_u64:
 ; CHECK:       # %bb.0:
@@ -195,6 +240,21 @@ define i64 @test_x86_tbm_blcfill_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t3
 }
 
+define i64 @test_x86_tbm_blcfill_u64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcfill_u64_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    blcfillq %rdi, %rcx
+; CHECK-NEXT:    testq %rcx, %rcx
+; CHECK-NEXT:    cmovgq %rdx, %rax
+; CHECK-NEXT:    retq
+  %t0 = add i64 %a, 1
+  %t1 = and i64 %t0, %a
+  %t2 = icmp sle i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %c
+  ret i64 %t3
+}
+
 define i32 @test_x86_tbm_blci_u32(i32 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blci_u32:
 ; CHECK:       # %bb.0:
@@ -235,6 +295,22 @@ define i32 @test_x86_tbm_blci_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t4
 }
 
+define i32 @test_x86_tbm_blci_u32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blci_u32_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    blcil %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovgl %edx, %eax
+; CHECK-NEXT:    retq
+  %t0 = add i32 1, %a
+  %t1 = xor i32 %t0, -1
+  %t2 = or i32 %t1, %a
+  %t3 = icmp sle i32 %t2, 0
+  %t4 = select i1 %t3, i32 %b, i32 %c
+  ret i32 %t4
+}
+
 define i64 @test_x86_tbm_blci_u64(i64 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blci_u64:
 ; CHECK:       # %bb.0:
@@ -275,6 +351,22 @@ define i64 @test_x86_tbm_blci_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t4
 }
 
+define i64 @test_x86_tbm_blci_u64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blci_u64_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    blciq %rdi, %rcx
+; CHECK-NEXT:    testq %rcx, %rcx
+; CHECK-NEXT:    cmovgq %rdx, %rax
+; CHECK-NEXT:    retq
+  %t0 = add i64 1, %a
+  %t1 = xor i64 %t0, -1
+  %t2 = or i64 %t1, %a
+  %t3 = icmp sle i64 %t2, 0
+  %t4 = select i1 %t3, i64 %b, i64 %c
+  ret i64 %t4
+}
+
 define i32 @test_x86_tbm_blci_u32_b(i32 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blci_u32_b:
 ; CHECK:       # %bb.0:
@@ -335,6 +427,22 @@ define i32 @test_x86_tbm_blcic_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t4
 }
 
+define i32 @test_x86_tbm_blcic_u32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcic_u32_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    blcicl %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovgl %edx, %eax
+; CHECK-NEXT:    retq
+  %t0 = xor i32 %a, -1
+  %t1 = add i32 %a, 1
+  %t2 = and i32 %t1, %t0
+  %t3 = icmp sle i32 %t2, 0
+  %t4 = select i1 %t3, i32 %b, i32 %c
+  ret i32 %t4
+}
+
 define i64 @test_x86_tbm_blcic_u64(i64 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blcic_u64:
 ; CHECK:       # %bb.0:
@@ -375,6 +483,22 @@ define i64 @test_x86_tbm_blcic_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t4
 }
 
+define i64 @test_x86_tbm_blcic_u64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcic_u64_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    blcicq %rdi, %rcx
+; CHECK-NEXT:    testq %rcx, %rcx
+; CHECK-NEXT:    cmovgq %rdx, %rax
+; CHECK-NEXT:    retq
+  %t0 = xor i64 %a, -1
+  %t1 = add i64 %a, 1
+  %t2 = and i64 %t1, %t0
+  %t3 = icmp sle i64 %t2, 0
+  %t4 = select i1 %t3, i64 %b, i64 %c
+  ret i64 %t4
+}
+
 define i32 @test_x86_tbm_blcmsk_u32(i32 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blcmsk_u32:
 ; CHECK:       # %bb.0:
@@ -412,6 +536,21 @@ define i32 @test_x86_tbm_blcmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t3
 }
 
+define i32 @test_x86_tbm_blcmsk_u32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcmsk_u32_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    blcmskl %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovgl %edx, %eax
+; CHECK-NEXT:    retq
+  %t0 = add i32 %a, 1
+  %t1 = xor i32 %t0, %a
+  %t2 = icmp sle i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %c
+  ret i32 %t3
+}
+
 define i64 @test_x86_tbm_blcmsk_u64(i64 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blcmsk_u64:
 ; CHECK:       # %bb.0:
@@ -449,6 +588,21 @@ define i64 @test_x86_tbm_blcmsk_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t3
 }
 
+define i64 @test_x86_tbm_blcmsk_u64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcmsk_u64_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    blcmskq %rdi, %rcx
+; CHECK-NEXT:    testq %rcx, %rcx
+; CHECK-NEXT:    cmovgq %rdx, %rax
+; CHECK-NEXT:    retq
+  %t0 = add i64 %a, 1
+  %t1 = xor i64 %t0, %a
+  %t2 = icmp sle i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %c
+  ret i64 %t3
+}
+
 define i32 @test_x86_tbm_blcs_u32(i32 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blcs_u32:
 ; CHECK:       # %bb.0:
@@ -486,6 +640,21 @@ define i32 @test_x86_tbm_blcs_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t3
 }
 
+define i32 @test_x86_tbm_blcs_u32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcs_u32_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    blcsl %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovgl %edx, %eax
+; CHECK-NEXT:    retq
+  %t0 = add i32 %a, 1
+  %t1 = or i32 %t0, %a
+  %t2 = icmp sle i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %c
+  ret i32 %t3
+}
+
 define i64 @test_x86_tbm_blcs_u64(i64 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blcs_u64:
 ; CHECK:       # %bb.0:
@@ -523,6 +692,21 @@ define i64 @test_x86_tbm_blcs_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t3
 }
 
+define i64 @test_x86_tbm_blcs_u64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blcs_u64_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    blcsq %rdi, %rcx
+; CHECK-NEXT:    testq %rcx, %rcx
+; CHECK-NEXT:    cmovgq %rdx, %rax
+; CHECK-NEXT:    retq
+  %t0 = add i64 %a, 1
+  %t1 = or i64 %t0, %a
+  %t2 = icmp sle i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %c
+  ret i64 %t3
+}
+
 define i32 @test_x86_tbm_blsfill_u32(i32 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blsfill_u32:
 ; CHECK:       # %bb.0:
@@ -560,6 +744,21 @@ define i32 @test_x86_tbm_blsfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t3
 }
 
+define i32 @test_x86_tbm_blsfill_u32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsfill_u32_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    blsfilll %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovgl %edx, %eax
+; CHECK-NEXT:    retq
+  %t0 = add i32 %a, -1
+  %t1 = or i32 %t0, %a
+  %t2 = icmp sle i32 %t1, 0
+  %t3 = select i1 %t2, i32 %b, i32 %c
+  ret i32 %t3
+}
+
 define i64 @test_x86_tbm_blsfill_u64(i64 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blsfill_u64:
 ; CHECK:       # %bb.0:
@@ -597,6 +796,21 @@ define i64 @test_x86_tbm_blsfill_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t3
 }
 
+define i64 @test_x86_tbm_blsfill_u64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsfill_u64_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    blsfillq %rdi, %rcx
+; CHECK-NEXT:    testq %rcx, %rcx
+; CHECK-NEXT:    cmovgq %rdx, %rax
+; CHECK-NEXT:    retq
+  %t0 = add i64 %a, -1
+  %t1 = or i64 %t0, %a
+  %t2 = icmp sle i64 %t1, 0
+  %t3 = select i1 %t2, i64 %b, i64 %c
+  ret i64 %t3
+}
+
 define i32 @test_x86_tbm_blsic_u32(i32 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blsic_u32:
 ; CHECK:       # %bb.0:
@@ -637,6 +851,22 @@ define i32 @test_x86_tbm_blsic_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t4
 }
 
+define i32 @test_x86_tbm_blsic_u32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsic_u32_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    blsicl %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovgl %edx, %eax
+; CHECK-NEXT:    retq
+  %t0 = xor i32 %a, -1
+  %t1 = add i32 %a, -1
+  %t2 = or i32 %t0, %t1
+  %t3 = icmp sle i32 %t2, 0
+  %t4 = select i1 %t3, i32 %b, i32 %c
+  ret i32 %t4
+}
+
 define i64 @test_x86_tbm_blsic_u64(i64 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_blsic_u64:
 ; CHECK:       # %bb.0:
@@ -677,6 +907,22 @@ define i64 @test_x86_tbm_blsic_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t4
 }
 
+define i64 @test_x86_tbm_blsic_u64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_blsic_u64_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    blsicq %rdi, %rcx
+; CHECK-NEXT:    testq %rcx, %rcx
+; CHECK-NEXT:    cmovgq %rdx, %rax
+; CHECK-NEXT:    retq
+  %t0 = xor i64 %a, -1
+  %t1 = add i64 %a, -1
+  %t2 = or i64 %t0, %t1
+  %t3 = icmp sle i64 %t2, 0
+  %t4 = select i1 %t3, i64 %b, i64 %c
+  ret i64 %t4
+}
+
 define i32 @test_x86_tbm_t1mskc_u32(i32 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_t1mskc_u32:
 ; CHECK:       # %bb.0:
@@ -717,6 +963,22 @@ define i32 @test_x86_tbm_t1mskc_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t4
 }
 
+define i32 @test_x86_tbm_t1mskc_u32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_t1mskc_u32_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    t1mskcl %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovgl %edx, %eax
+; CHECK-NEXT:    retq
+  %t0 = xor i32 %a, -1
+  %t1 = add i32 %a, 1
+  %t2 = or i32 %t0, %t1
+  %t3 = icmp sle i32 %t2, 0
+  %t4 = select i1 %t3, i32 %b, i32 %c
+  ret i32 %t4
+}
+
 define i64 @test_x86_tbm_t1mskc_u64(i64 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_t1mskc_u64:
 ; CHECK:       # %bb.0:
@@ -757,6 +1019,22 @@ define i64 @test_x86_tbm_t1mskc_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t4
 }
 
+define i64 @test_x86_tbm_t1mskc_u64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_t1mskc_u64_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    t1mskcq %rdi, %rcx
+; CHECK-NEXT:    testq %rcx, %rcx
+; CHECK-NEXT:    cmovgq %rdx, %rax
+; CHECK-NEXT:    retq
+  %t0 = xor i64 %a, -1
+  %t1 = add i64 %a, 1
+  %t2 = or i64 %t0, %t1
+  %t3 = icmp sle i64 %t2, 0
+  %t4 = select i1 %t3, i64 %b, i64 %c
+  ret i64 %t4
+}
+
 define i32 @test_x86_tbm_tzmsk_u32(i32 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_tzmsk_u32:
 ; CHECK:       # %bb.0:
@@ -797,6 +1075,22 @@ define i32 @test_x86_tbm_tzmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
   ret i32 %t4
 }
 
+define i32 @test_x86_tbm_tzmsk_u32_sle(i32 %a, i32 %b, i32 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_tzmsk_u32_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    tzmskl %edi, %ecx
+; CHECK-NEXT:    testl %ecx, %ecx
+; CHECK-NEXT:    cmovgl %edx, %eax
+; CHECK-NEXT:    retq
+  %t0 = xor i32 %a, -1
+  %t1 = add i32 %a, -1
+  %t2 = and i32 %t0, %t1
+  %t3 = icmp sle i32 %t2, 0
+  %t4 = select i1 %t3, i32 %b, i32 %c
+  ret i32 %t4
+}
+
 define i64 @test_x86_tbm_tzmsk_u64(i64 %a) nounwind {
 ; CHECK-LABEL: test_x86_tbm_tzmsk_u64:
 ; CHECK:       # %bb.0:
@@ -837,6 +1131,22 @@ define i64 @test_x86_tbm_tzmsk_u64_z2(i64 %a, i64 %b, i64 %c) nounwind {
   ret i64 %t4
 }
 
+define i64 @test_x86_tbm_tzmsk_u64_sle(i64 %a, i64 %b, i64 %c) nounwind {
+; CHECK-LABEL: test_x86_tbm_tzmsk_u64_sle:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    tzmskq %rdi, %rcx
+; CHECK-NEXT:    testq %rcx, %rcx
+; CHECK-NEXT:    cmovgq %rdx, %rax
+; CHECK-NEXT:    retq
+  %t0 = xor i64 %a, -1
+  %t1 = add i64 %a, -1
+  %t2 = and i64 %t0, %t1
+  %t3 = icmp sle i64 %t2, 0
+  %t4 = select i1 %t3, i64 %b, i64 %c
+  ret i64 %t4
+}
+
 define i64 @test_and_large_constant_mask(i64 %x) {
 ; CHECK-LABEL: test_and_large_constant_mask:
 ; CHECK:       # %bb.0: # %entry
@@ -877,10 +1187,10 @@ define i32 @blcic32_branch(i32 %x) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    blcicl %edi, %ebx
-; CHECK-NEXT:    jne .LBB69_2
+; CHECK-NEXT:    jne .LBB89_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    callq bar at PLT
-; CHECK-NEXT:  .LBB69_2:
+; CHECK-NEXT:  .LBB89_2:
 ; CHECK-NEXT:    movl %ebx, %eax
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
@@ -900,10 +1210,10 @@ define i64 @blcic64_branch(i64 %x) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    blcicq %rdi, %rbx
-; CHECK-NEXT:    jne .LBB70_2
+; CHECK-NEXT:    jne .LBB90_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    callq bar at PLT
-; CHECK-NEXT:  .LBB70_2:
+; CHECK-NEXT:  .LBB90_2:
 ; CHECK-NEXT:    movq %rbx, %rax
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
@@ -923,10 +1233,10 @@ define i32 @tzmsk32_branch(i32 %x) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    tzmskl %edi, %ebx
-; CHECK-NEXT:    jne .LBB71_2
+; CHECK-NEXT:    jne .LBB91_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    callq bar at PLT
-; CHECK-NEXT:  .LBB71_2:
+; CHECK-NEXT:  .LBB91_2:
 ; CHECK-NEXT:    movl %ebx, %eax
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
@@ -946,10 +1256,10 @@ define i64 @tzmsk64_branch(i64 %x) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    tzmskq %rdi, %rbx
-; CHECK-NEXT:    jne .LBB72_2
+; CHECK-NEXT:    jne .LBB92_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    callq bar at PLT
-; CHECK-NEXT:  .LBB72_2:
+; CHECK-NEXT:  .LBB92_2:
 ; CHECK-NEXT:    movq %rbx, %rax
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
@@ -969,10 +1279,10 @@ define i32 @blcfill32_branch(i32 %x) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    blcfilll %edi, %ebx
-; CHECK-NEXT:    jne .LBB73_2
+; CHECK-NEXT:    jne .LBB93_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    callq bar at PLT
-; CHECK-NEXT:  .LBB73_2:
+; CHECK-NEXT:  .LBB93_2:
 ; CHECK-NEXT:    movl %ebx, %eax
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
@@ -991,10 +1301,10 @@ define i64 @blcfill64_branch(i64 %x) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    pushq %rbx
 ; CHECK-NEXT:    blcfillq %rdi, %rbx
-; CHECK-NEXT:    jne .LBB74_2
+; CHECK-NEXT:    jne .LBB94_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    callq bar at PLT
-; CHECK-NEXT:  .LBB74_2:
+; CHECK-NEXT:  .LBB94_2:
 ; CHECK-NEXT:    movq %rbx, %rax
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq


        


More information about the llvm-commits mailing list