[llvm] [RFC][X86] Allow speculative BSR/BSF instructions on targets with CMOV (PR #102885)

via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 12 04:57:46 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-x86

Author: Simon Pilgrim (RKSimon)

<details>
<summary>Changes</summary>

Currently targets without LZCNT/TZCNT won't speculate with BSR/BSF instructions in case they have a zero value input, meaning we always insert a test+branch for the zero-input case.

This RFC patch proposes we allow speculation if the target has CMOV, and perform a branchless select instead to handle the zero input case. This will predominately help x86-64 targets where we have the triple set but haven't generated codegen for any particular cpu target.

I can't recall the entire history of why we don't already do this - BSR/BSF only set the ZF bit, and leave the others in an undefined state (at least on AMD CPUs), which can cause false dependencies before the CMOV can use the result, which might cause minor stalls, which I suppose could be avoided in well predicated branches. Can anyone recall any other reasons?

A more restricted version of this patch would be to just handle the isCheapToSpeculateCttz case and rely on the "REP BSF" expansion we already do (which effectively would do a hidden TZCNT followed by CMOV).

Any comments?

---

Patch is 69.06 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/102885.diff


13 Files Affected:

- (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+2-2) 
- (modified) llvm/test/CodeGen/X86/atomic-bit-test.ll (-1) 
- (modified) llvm/test/CodeGen/X86/bit_ceil.ll (+17-36) 
- (modified) llvm/test/CodeGen/X86/combine-or.ll (+20-27) 
- (modified) llvm/test/CodeGen/X86/ctlo.ll (+90-71) 
- (modified) llvm/test/CodeGen/X86/ctlz.ll (+157-147) 
- (modified) llvm/test/CodeGen/X86/cttz.ll (+22-23) 
- (modified) llvm/test/CodeGen/X86/known-never-zero.ll (+137-370) 
- (modified) llvm/test/CodeGen/X86/lzcnt-cmp.ll (+10-42) 
- (modified) llvm/test/CodeGen/X86/pr57673.ll (+14-36) 
- (modified) llvm/test/CodeGen/X86/pr89877.ll (+4-12) 
- (modified) llvm/test/CodeGen/X86/pr90847.ll (+4-14) 
- (modified) llvm/test/CodeGen/X86/pr92569.ll (+6-10) 


``````````diff
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index b604e85b46e788..8a5b31bbc1f471 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -3238,13 +3238,13 @@ bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
 
 bool X86TargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
   // Speculate cttz only if we can directly use TZCNT or can promote to i32.
-  return Subtarget.hasBMI() ||
+  return Subtarget.hasBMI() || Subtarget.canUseCMOV() ||
          (!Ty->isVectorTy() && Ty->getScalarSizeInBits() < 32);
 }
 
 bool X86TargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
   // Speculate ctlz only if we can directly use LZCNT.
-  return Subtarget.hasLZCNT();
+  return Subtarget.hasLZCNT() || Subtarget.canUseCMOV();
 }
 
 bool X86TargetLowering::ShouldShrinkFPConstant(EVT VT) const {
diff --git a/llvm/test/CodeGen/X86/atomic-bit-test.ll b/llvm/test/CodeGen/X86/atomic-bit-test.ll
index f39c4b5e620d0e..10b6605c3fb05e 100644
--- a/llvm/test/CodeGen/X86/atomic-bit-test.ll
+++ b/llvm/test/CodeGen/X86/atomic-bit-test.ll
@@ -582,7 +582,6 @@ define i32 @split_hoist_and(i32 %0) nounwind {
 ; X64-NEXT:    lock btsl $3, v32(%rip)
 ; X64-NEXT:    setb %al
 ; X64-NEXT:    shll $3, %eax
-; X64-NEXT:    testl %edi, %edi
 ; X64-NEXT:    retq
   %2 = atomicrmw or ptr @v32, i32 8 monotonic, align 4
   %3 = tail call i32 @llvm.ctlz.i32(i32 %0, i1 false)
diff --git a/llvm/test/CodeGen/X86/bit_ceil.ll b/llvm/test/CodeGen/X86/bit_ceil.ll
index 4641c114238f8f..823453087f6180 100644
--- a/llvm/test/CodeGen/X86/bit_ceil.ll
+++ b/llvm/test/CodeGen/X86/bit_ceil.ll
@@ -8,16 +8,12 @@
 define i32 @bit_ceil_i32(i32 %x) {
 ; NOBMI-LABEL: bit_ceil_i32:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    movl %edi, %eax
-; NOBMI-NEXT:    decl %eax
-; NOBMI-NEXT:    je .LBB0_1
-; NOBMI-NEXT:  # %bb.2: # %cond.false
-; NOBMI-NEXT:    bsrl %eax, %ecx
+; NOBMI-NEXT:    # kill: def $edi killed $edi def $rdi
+; NOBMI-NEXT:    leal -1(%rdi), %eax
+; NOBMI-NEXT:    bsrl %eax, %eax
+; NOBMI-NEXT:    movl $63, %ecx
+; NOBMI-NEXT:    cmovnel %eax, %ecx
 ; NOBMI-NEXT:    xorl $31, %ecx
-; NOBMI-NEXT:    jmp .LBB0_3
-; NOBMI-NEXT:  .LBB0_1:
-; NOBMI-NEXT:    movl $32, %ecx
-; NOBMI-NEXT:  .LBB0_3: # %cond.end
 ; NOBMI-NEXT:    negb %cl
 ; NOBMI-NEXT:    movl $1, %edx
 ; NOBMI-NEXT:    movl $1, %eax
@@ -51,15 +47,10 @@ define i32 @bit_ceil_i32(i32 %x) {
 define i32 @bit_ceil_i32_plus1(i32 noundef %x) {
 ; NOBMI-LABEL: bit_ceil_i32_plus1:
 ; NOBMI:       # %bb.0: # %entry
-; NOBMI-NEXT:    testl %edi, %edi
-; NOBMI-NEXT:    je .LBB1_1
-; NOBMI-NEXT:  # %bb.2: # %cond.false
-; NOBMI-NEXT:    bsrl %edi, %ecx
+; NOBMI-NEXT:    bsrl %edi, %eax
+; NOBMI-NEXT:    movl $63, %ecx
+; NOBMI-NEXT:    cmovnel %eax, %ecx
 ; NOBMI-NEXT:    xorl $31, %ecx
-; NOBMI-NEXT:    jmp .LBB1_3
-; NOBMI-NEXT:  .LBB1_1:
-; NOBMI-NEXT:    movl $32, %ecx
-; NOBMI-NEXT:  .LBB1_3: # %cond.end
 ; NOBMI-NEXT:    negb %cl
 ; NOBMI-NEXT:    movl $1, %edx
 ; NOBMI-NEXT:    movl $1, %eax
@@ -94,16 +85,11 @@ entry:
 define i64 @bit_ceil_i64(i64 %x) {
 ; NOBMI-LABEL: bit_ceil_i64:
 ; NOBMI:       # %bb.0:
-; NOBMI-NEXT:    movq %rdi, %rax
-; NOBMI-NEXT:    decq %rax
-; NOBMI-NEXT:    je .LBB2_1
-; NOBMI-NEXT:  # %bb.2: # %cond.false
-; NOBMI-NEXT:    bsrq %rax, %rcx
-; NOBMI-NEXT:    xorq $63, %rcx
-; NOBMI-NEXT:    jmp .LBB2_3
-; NOBMI-NEXT:  .LBB2_1:
-; NOBMI-NEXT:    movl $64, %ecx
-; NOBMI-NEXT:  .LBB2_3: # %cond.end
+; NOBMI-NEXT:    leaq -1(%rdi), %rax
+; NOBMI-NEXT:    bsrq %rax, %rax
+; NOBMI-NEXT:    movl $127, %ecx
+; NOBMI-NEXT:    cmovneq %rax, %rcx
+; NOBMI-NEXT:    xorl $63, %ecx
 ; NOBMI-NEXT:    negb %cl
 ; NOBMI-NEXT:    movl $1, %edx
 ; NOBMI-NEXT:    movl $1, %eax
@@ -136,15 +122,10 @@ define i64 @bit_ceil_i64(i64 %x) {
 define i64 @bit_ceil_i64_plus1(i64 noundef %x) {
 ; NOBMI-LABEL: bit_ceil_i64_plus1:
 ; NOBMI:       # %bb.0: # %entry
-; NOBMI-NEXT:    testq %rdi, %rdi
-; NOBMI-NEXT:    je .LBB3_1
-; NOBMI-NEXT:  # %bb.2: # %cond.false
-; NOBMI-NEXT:    bsrq %rdi, %rcx
-; NOBMI-NEXT:    xorq $63, %rcx
-; NOBMI-NEXT:    jmp .LBB3_3
-; NOBMI-NEXT:  .LBB3_1:
-; NOBMI-NEXT:    movl $64, %ecx
-; NOBMI-NEXT:  .LBB3_3: # %cond.end
+; NOBMI-NEXT:    bsrq %rdi, %rax
+; NOBMI-NEXT:    movl $127, %ecx
+; NOBMI-NEXT:    cmovneq %rax, %rcx
+; NOBMI-NEXT:    xorl $63, %ecx
 ; NOBMI-NEXT:    negb %cl
 ; NOBMI-NEXT:    movl $1, %edx
 ; NOBMI-NEXT:    movl $1, %eax
diff --git a/llvm/test/CodeGen/X86/combine-or.ll b/llvm/test/CodeGen/X86/combine-or.ll
index 3b2102f46a297a..4060355495eb3b 100644
--- a/llvm/test/CodeGen/X86/combine-or.ll
+++ b/llvm/test/CodeGen/X86/combine-or.ll
@@ -213,21 +213,18 @@ define i64 @PR89533(<64 x i8> %a0) {
 ; SSE-NEXT:    shll $16, %ecx
 ; SSE-NEXT:    orl %eax, %ecx
 ; SSE-NEXT:    pcmpeqb %xmm4, %xmm2
-; SSE-NEXT:    pmovmskb %xmm2, %edx
-; SSE-NEXT:    xorl $65535, %edx # imm = 0xFFFF
+; SSE-NEXT:    pmovmskb %xmm2, %eax
+; SSE-NEXT:    xorl $65535, %eax # imm = 0xFFFF
 ; SSE-NEXT:    pcmpeqb %xmm4, %xmm3
-; SSE-NEXT:    pmovmskb %xmm3, %eax
-; SSE-NEXT:    notl %eax
-; SSE-NEXT:    shll $16, %eax
-; SSE-NEXT:    orl %edx, %eax
-; SSE-NEXT:    shlq $32, %rax
-; SSE-NEXT:    orq %rcx, %rax
-; SSE-NEXT:    je .LBB11_2
-; SSE-NEXT:  # %bb.1: # %cond.false
-; SSE-NEXT:    rep bsfq %rax, %rax
-; SSE-NEXT:    retq
-; SSE-NEXT:  .LBB11_2: # %cond.end
+; SSE-NEXT:    pmovmskb %xmm3, %edx
+; SSE-NEXT:    notl %edx
+; SSE-NEXT:    shll $16, %edx
+; SSE-NEXT:    orl %eax, %edx
+; SSE-NEXT:    shlq $32, %rdx
+; SSE-NEXT:    orq %rcx, %rdx
+; SSE-NEXT:    bsfq %rdx, %rcx
 ; SSE-NEXT:    movl $64, %eax
+; SSE-NEXT:    cmovneq %rcx, %rax
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: PR89533:
@@ -243,23 +240,19 @@ define i64 @PR89533(<64 x i8> %a0) {
 ; AVX1-NEXT:    shll $16, %ecx
 ; AVX1-NEXT:    orl %eax, %ecx
 ; AVX1-NEXT:    vpcmpeqb %xmm2, %xmm1, %xmm0
-; AVX1-NEXT:    vpmovmskb %xmm0, %edx
-; AVX1-NEXT:    xorl $65535, %edx # imm = 0xFFFF
+; AVX1-NEXT:    vpmovmskb %xmm0, %eax
+; AVX1-NEXT:    xorl $65535, %eax # imm = 0xFFFF
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm0
 ; AVX1-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpmovmskb %xmm0, %eax
-; AVX1-NEXT:    notl %eax
-; AVX1-NEXT:    shll $16, %eax
-; AVX1-NEXT:    orl %edx, %eax
-; AVX1-NEXT:    shlq $32, %rax
-; AVX1-NEXT:    orq %rcx, %rax
-; AVX1-NEXT:    je .LBB11_2
-; AVX1-NEXT:  # %bb.1: # %cond.false
-; AVX1-NEXT:    rep bsfq %rax, %rax
-; AVX1-NEXT:    vzeroupper
-; AVX1-NEXT:    retq
-; AVX1-NEXT:  .LBB11_2: # %cond.end
+; AVX1-NEXT:    vpmovmskb %xmm0, %edx
+; AVX1-NEXT:    notl %edx
+; AVX1-NEXT:    shll $16, %edx
+; AVX1-NEXT:    orl %eax, %edx
+; AVX1-NEXT:    shlq $32, %rdx
+; AVX1-NEXT:    orq %rcx, %rdx
+; AVX1-NEXT:    bsfq %rdx, %rcx
 ; AVX1-NEXT:    movl $64, %eax
+; AVX1-NEXT:    cmovneq %rcx, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/ctlo.ll b/llvm/test/CodeGen/X86/ctlo.ll
index bb80279e28f3d3..f383c9a2544fca 100644
--- a/llvm/test/CodeGen/X86/ctlo.ll
+++ b/llvm/test/CodeGen/X86/ctlo.ll
@@ -13,36 +13,44 @@ declare i32 @llvm.ctlz.i32(i32, i1)
 declare i64 @llvm.ctlz.i64(i64, i1)
 
 define i8 @ctlo_i8(i8 %x) {
-; X86-LABEL: ctlo_i8:
-; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    xorb $-1, %al
-; X86-NEXT:    je .LBB0_1
-; X86-NEXT:  # %bb.2: # %cond.false
-; X86-NEXT:    movzbl %al, %eax
-; X86-NEXT:    bsrl %eax, %eax
-; X86-NEXT:    xorl $7, %eax
-; X86-NEXT:    # kill: def $al killed $al killed $eax
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_1:
-; X86-NEXT:    movb $8, %al
-; X86-NEXT:    # kill: def $al killed $al killed $eax
-; X86-NEXT:    retl
+; X86-NOCMOV-LABEL: ctlo_i8:
+; X86-NOCMOV:       # %bb.0:
+; X86-NOCMOV-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT:    xorb $-1, %al
+; X86-NOCMOV-NEXT:    je .LBB0_1
+; X86-NOCMOV-NEXT:  # %bb.2: # %cond.false
+; X86-NOCMOV-NEXT:    movzbl %al, %eax
+; X86-NOCMOV-NEXT:    bsrl %eax, %eax
+; X86-NOCMOV-NEXT:    xorl $7, %eax
+; X86-NOCMOV-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NOCMOV-NEXT:    retl
+; X86-NOCMOV-NEXT:  .LBB0_1:
+; X86-NOCMOV-NEXT:    movb $8, %al
+; X86-NOCMOV-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NOCMOV-NEXT:    retl
+;
+; X86-CMOV-LABEL: ctlo_i8:
+; X86-CMOV:       # %bb.0:
+; X86-CMOV-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-CMOV-NEXT:    notb %al
+; X86-CMOV-NEXT:    movzbl %al, %eax
+; X86-CMOV-NEXT:    bsrl %eax, %ecx
+; X86-CMOV-NEXT:    movl $15, %eax
+; X86-CMOV-NEXT:    cmovnel %ecx, %eax
+; X86-CMOV-NEXT:    xorl $7, %eax
+; X86-CMOV-NEXT:    # kill: def $al killed $al killed $eax
+; X86-CMOV-NEXT:    retl
 ;
 ; X64-LABEL: ctlo_i8:
 ; X64:       # %bb.0:
-; X64-NEXT:    xorb $-1, %dil
-; X64-NEXT:    je .LBB0_1
-; X64-NEXT:  # %bb.2: # %cond.false
+; X64-NEXT:    notb %dil
 ; X64-NEXT:    movzbl %dil, %eax
-; X64-NEXT:    bsrl %eax, %eax
+; X64-NEXT:    bsrl %eax, %ecx
+; X64-NEXT:    movl $15, %eax
+; X64-NEXT:    cmovnel %ecx, %eax
 ; X64-NEXT:    xorl $7, %eax
 ; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
-; X64-NEXT:  .LBB0_1:
-; X64-NEXT:    movb $8, %al
-; X64-NEXT:    # kill: def $al killed $al killed $eax
-; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlo_i8:
 ; X86-CLZ:       # %bb.0:
@@ -111,34 +119,41 @@ define i8 @ctlo_i8_undef(i8 %x) {
 }
 
 define i16 @ctlo_i16(i16 %x) {
-; X86-LABEL: ctlo_i16:
-; X86:       # %bb.0:
-; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    xorw $-1, %ax
-; X86-NEXT:    je .LBB2_1
-; X86-NEXT:  # %bb.2: # %cond.false
-; X86-NEXT:    bsrw %ax, %ax
-; X86-NEXT:    xorl $15, %eax
-; X86-NEXT:    # kill: def $ax killed $ax killed $eax
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB2_1:
-; X86-NEXT:    movw $16, %ax
-; X86-NEXT:    # kill: def $ax killed $ax killed $eax
-; X86-NEXT:    retl
+; X86-NOCMOV-LABEL: ctlo_i16:
+; X86-NOCMOV:       # %bb.0:
+; X86-NOCMOV-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT:    xorw $-1, %ax
+; X86-NOCMOV-NEXT:    je .LBB2_1
+; X86-NOCMOV-NEXT:  # %bb.2: # %cond.false
+; X86-NOCMOV-NEXT:    bsrw %ax, %ax
+; X86-NOCMOV-NEXT:    xorl $15, %eax
+; X86-NOCMOV-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NOCMOV-NEXT:    retl
+; X86-NOCMOV-NEXT:  .LBB2_1:
+; X86-NOCMOV-NEXT:    movw $16, %ax
+; X86-NOCMOV-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NOCMOV-NEXT:    retl
+;
+; X86-CMOV-LABEL: ctlo_i16:
+; X86-CMOV:       # %bb.0:
+; X86-CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-CMOV-NEXT:    notl %eax
+; X86-CMOV-NEXT:    bsrw %ax, %cx
+; X86-CMOV-NEXT:    movw $31, %ax
+; X86-CMOV-NEXT:    cmovnew %cx, %ax
+; X86-CMOV-NEXT:    xorl $15, %eax
+; X86-CMOV-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-CMOV-NEXT:    retl
 ;
 ; X64-LABEL: ctlo_i16:
 ; X64:       # %bb.0:
-; X64-NEXT:    xorw $-1, %di
-; X64-NEXT:    je .LBB2_1
-; X64-NEXT:  # %bb.2: # %cond.false
-; X64-NEXT:    bsrw %di, %ax
+; X64-NEXT:    notl %edi
+; X64-NEXT:    bsrw %di, %cx
+; X64-NEXT:    movw $31, %ax
+; X64-NEXT:    cmovnew %cx, %ax
 ; X64-NEXT:    xorl $15, %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
-; X64-NEXT:  .LBB2_1:
-; X64-NEXT:    movw $16, %ax
-; X64-NEXT:    # kill: def $ax killed $ax killed $eax
-; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlo_i16:
 ; X86-CLZ:       # %bb.0:
@@ -193,30 +208,37 @@ define i16 @ctlo_i16_undef(i16 %x) {
 }
 
 define i32 @ctlo_i32(i32 %x) {
-; X86-LABEL: ctlo_i32:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    xorl $-1, %eax
-; X86-NEXT:    je .LBB4_1
-; X86-NEXT:  # %bb.2: # %cond.false
-; X86-NEXT:    bsrl %eax, %eax
-; X86-NEXT:    xorl $31, %eax
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB4_1:
-; X86-NEXT:    movl $32, %eax
-; X86-NEXT:    retl
+; X86-NOCMOV-LABEL: ctlo_i32:
+; X86-NOCMOV:       # %bb.0:
+; X86-NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT:    xorl $-1, %eax
+; X86-NOCMOV-NEXT:    je .LBB4_1
+; X86-NOCMOV-NEXT:  # %bb.2: # %cond.false
+; X86-NOCMOV-NEXT:    bsrl %eax, %eax
+; X86-NOCMOV-NEXT:    xorl $31, %eax
+; X86-NOCMOV-NEXT:    retl
+; X86-NOCMOV-NEXT:  .LBB4_1:
+; X86-NOCMOV-NEXT:    movl $32, %eax
+; X86-NOCMOV-NEXT:    retl
+;
+; X86-CMOV-LABEL: ctlo_i32:
+; X86-CMOV:       # %bb.0:
+; X86-CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-CMOV-NEXT:    notl %eax
+; X86-CMOV-NEXT:    bsrl %eax, %ecx
+; X86-CMOV-NEXT:    movl $63, %eax
+; X86-CMOV-NEXT:    cmovnel %ecx, %eax
+; X86-CMOV-NEXT:    xorl $31, %eax
+; X86-CMOV-NEXT:    retl
 ;
 ; X64-LABEL: ctlo_i32:
 ; X64:       # %bb.0:
-; X64-NEXT:    xorl $-1, %edi
-; X64-NEXT:    je .LBB4_1
-; X64-NEXT:  # %bb.2: # %cond.false
-; X64-NEXT:    bsrl %edi, %eax
+; X64-NEXT:    notl %edi
+; X64-NEXT:    bsrl %edi, %ecx
+; X64-NEXT:    movl $63, %eax
+; X64-NEXT:    cmovnel %ecx, %eax
 ; X64-NEXT:    xorl $31, %eax
 ; X64-NEXT:    retq
-; X64-NEXT:  .LBB4_1:
-; X64-NEXT:    movl $32, %eax
-; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlo_i32:
 ; X86-CLZ:       # %bb.0:
@@ -314,15 +336,12 @@ define i64 @ctlo_i64(i64 %x) {
 ;
 ; X64-LABEL: ctlo_i64:
 ; X64:       # %bb.0:
-; X64-NEXT:    xorq $-1, %rdi
-; X64-NEXT:    je .LBB6_1
-; X64-NEXT:  # %bb.2: # %cond.false
-; X64-NEXT:    bsrq %rdi, %rax
+; X64-NEXT:    notq %rdi
+; X64-NEXT:    bsrq %rdi, %rcx
+; X64-NEXT:    movl $127, %eax
+; X64-NEXT:    cmovneq %rcx, %rax
 ; X64-NEXT:    xorq $63, %rax
 ; X64-NEXT:    retq
-; X64-NEXT:  .LBB6_1:
-; X64-NEXT:    movl $64, %eax
-; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlo_i64:
 ; X86-CLZ:       # %bb.0:
diff --git a/llvm/test/CodeGen/X86/ctlz.ll b/llvm/test/CodeGen/X86/ctlz.ll
index d8f83502bd849a..6635be18b0f7a7 100644
--- a/llvm/test/CodeGen/X86/ctlz.ll
+++ b/llvm/test/CodeGen/X86/ctlz.ll
@@ -218,36 +218,41 @@ define i64 @ctlz_i64(i64 %x) {
 
 ; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
 define i8 @ctlz_i8_zero_test(i8 %n) {
-; X86-LABEL: ctlz_i8_zero_test:
-; X86:       # %bb.0:
-; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    testb %al, %al
-; X86-NEXT:    je .LBB4_1
-; X86-NEXT:  # %bb.2: # %cond.false
-; X86-NEXT:    movzbl %al, %eax
-; X86-NEXT:    bsrl %eax, %eax
-; X86-NEXT:    xorl $7, %eax
-; X86-NEXT:    # kill: def $al killed $al killed $eax
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB4_1:
-; X86-NEXT:    movb $8, %al
-; X86-NEXT:    # kill: def $al killed $al killed $eax
-; X86-NEXT:    retl
+; X86-NOCMOV-LABEL: ctlz_i8_zero_test:
+; X86-NOCMOV:       # %bb.0:
+; X86-NOCMOV-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT:    testb %al, %al
+; X86-NOCMOV-NEXT:    je .LBB4_1
+; X86-NOCMOV-NEXT:  # %bb.2: # %cond.false
+; X86-NOCMOV-NEXT:    movzbl %al, %eax
+; X86-NOCMOV-NEXT:    bsrl %eax, %eax
+; X86-NOCMOV-NEXT:    xorl $7, %eax
+; X86-NOCMOV-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NOCMOV-NEXT:    retl
+; X86-NOCMOV-NEXT:  .LBB4_1:
+; X86-NOCMOV-NEXT:    movb $8, %al
+; X86-NOCMOV-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NOCMOV-NEXT:    retl
+;
+; X86-CMOV-LABEL: ctlz_i8_zero_test:
+; X86-CMOV:       # %bb.0:
+; X86-CMOV-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-CMOV-NEXT:    bsrl %eax, %ecx
+; X86-CMOV-NEXT:    movl $15, %eax
+; X86-CMOV-NEXT:    cmovnel %ecx, %eax
+; X86-CMOV-NEXT:    xorl $7, %eax
+; X86-CMOV-NEXT:    # kill: def $al killed $al killed $eax
+; X86-CMOV-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i8_zero_test:
 ; X64:       # %bb.0:
-; X64-NEXT:    testb %dil, %dil
-; X64-NEXT:    je .LBB4_1
-; X64-NEXT:  # %bb.2: # %cond.false
 ; X64-NEXT:    movzbl %dil, %eax
-; X64-NEXT:    bsrl %eax, %eax
+; X64-NEXT:    bsrl %eax, %ecx
+; X64-NEXT:    movl $15, %eax
+; X64-NEXT:    cmovnel %ecx, %eax
 ; X64-NEXT:    xorl $7, %eax
 ; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
-; X64-NEXT:  .LBB4_1:
-; X64-NEXT:    movb $8, %al
-; X64-NEXT:    # kill: def $al killed $al killed $eax
-; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlz_i8_zero_test:
 ; X86-CLZ:       # %bb.0:
@@ -286,34 +291,38 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
 
 ; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
 define i16 @ctlz_i16_zero_test(i16 %n) {
-; X86-LABEL: ctlz_i16_zero_test:
-; X86:       # %bb.0:
-; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    testw %ax, %ax
-; X86-NEXT:    je .LBB5_1
-; X86-NEXT:  # %bb.2: # %cond.false
-; X86-NEXT:    bsrw %ax, %ax
-; X86-NEXT:    xorl $15, %eax
-; X86-NEXT:    # kill: def $ax killed $ax killed $eax
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB5_1:
-; X86-NEXT:    movw $16, %ax
-; X86-NEXT:    # kill: def $ax killed $ax killed $eax
-; X86-NEXT:    retl
+; X86-NOCMOV-LABEL: ctlz_i16_zero_test:
+; X86-NOCMOV:       # %bb.0:
+; X86-NOCMOV-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT:    testw %ax, %ax
+; X86-NOCMOV-NEXT:    je .LBB5_1
+; X86-NOCMOV-NEXT:  # %bb.2: # %cond.false
+; X86-NOCMOV-NEXT:    bsrw %ax, %ax
+; X86-NOCMOV-NEXT:    xorl $15, %eax
+; X86-NOCMOV-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NOCMOV-NEXT:    retl
+; X86-NOCMOV-NEXT:  .LBB5_1:
+; X86-NOCMOV-NEXT:    movw $16, %ax
+; X86-NOCMOV-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-NOCMOV-NEXT:    retl
+;
+; X86-CMOV-LABEL: ctlz_i16_zero_test:
+; X86-CMOV:       # %bb.0:
+; X86-CMOV-NEXT:    bsrw {{[0-9]+}}(%esp), %cx
+; X86-CMOV-NEXT:    movw $31, %ax
+; X86-CMOV-NEXT:    cmovnew %cx, %ax
+; X86-CMOV-NEXT:    xorl $15, %eax
+; X86-CMOV-NEXT:    # kill: def $ax killed $ax killed $eax
+; X86-CMOV-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i16_zero_test:
 ; X64:       # %bb.0:
-; X64-NEXT:    testw %di, %di
-; X64-NEXT:    je .LBB5_1
-; X64-NEXT:  # %bb.2: # %cond.false
-; X64-NEXT:    bsrw %di, %ax
+; X64-NEXT:    bsrw %di, %cx
+; X64-NEXT:    movw $31, %ax
+; X64-NEXT:    cmovnew %cx, %ax
 ; X64-NEXT:    xorl $15, %eax
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
-; X64-NEXT:  .LBB5_1:
-; X64-NEXT:    movw $16, %ax
-; X64-NEXT:    # kill: def $ax killed $ax killed $eax
-; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlz_i16_zero_test:
 ; X86-CLZ:       # %bb.0:
@@ -340,30 +349,34 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
 
 ; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
 define i32 @ctlz_i32_zero_test(i32 %n) {
-; X86-LABEL: ctlz_i32_zero_test:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    testl %eax, %eax
-; X86-NEXT:    je .LBB6_1
-; X86-NEXT:  # %bb.2: # %cond.false
-; X86-NEXT:    bsrl %eax, %eax
-; X86-NEXT:    xorl $31, %eax
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB6_1:
-; X86-NEXT:    movl $32, %eax
-; X86-NEXT:    retl
+; X86-NOCMOV-LABEL: ctlz_i32_zero_test:
+; X86-NOCMOV:       # %bb.0:
+; X86-NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT:    testl %eax, %eax
+; X86-NOCMOV-NEXT:    je .LBB6_1
+; X86-NOCMOV-NEXT:  # %bb.2: # %cond.false
+; X86-NOCMOV-NEXT:    bsrl %eax, %eax
+; X86-NOCMOV-NEXT:    xorl $31, %eax
+; X86-NOCMOV-NEXT:    retl
+; X86-NOCMOV-NEXT:  .LBB6_1:
+; X86-NOCMOV-NEXT:    movl $32, %eax
+; X86-NOCMOV-NEXT:    retl
+;
+; X86-CMOV-LABEL: ctlz_i32_zero_test:
+; X86-CMOV:       # %bb.0:
+; X86-CMOV-NEXT:    bsrl {{[0-9]+}}(%esp), %ecx
+; X86-CMOV-NEXT:    movl $63, %eax
+; X86-CMOV-NEXT:    cmovnel %ecx, %eax
+; X86-CMOV-NEXT:    xorl $31, %eax
+; X86-CMOV-NEXT:    retl
 ;
 ; X64-LABEL: ctlz_i32_zero_test:
 ; X64:       # %bb.0:
-; X64-NEXT:    testl %edi, %edi
-; X64-NEXT:    je .LBB6_1
-; X64-NEXT:  # %bb.2: # %cond.false
-; X64-NEXT:    bsrl %edi, %eax
+; X64-NEXT:    bsrl %edi, %ecx
+; X64-NEXT:    movl $63, %eax
+; X64-NEXT:    cmovnel %ecx, %eax
 ; X64-NEXT:    xorl $31, %eax
 ; X64-NEXT:    retq
-; X64-NEXT:  .LBB6_1:
-; X64-NEXT:    movl $32, %eax
-; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlz_i32_zero_test:
 ; X86-CLZ:       # %bb.0:
@@ -429,15 +442,11 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
 ;
 ; X64-LABEL: ctlz_i64_zero_test:
 ; X64:       # %bb.0:
-; X64-NEXT:    testq %rdi, %rdi
-; X64-NEXT:    je .LBB7_1
-; X64-NEXT:  # %bb.2: # %cond.false
-; X64-NEXT:    bsrq %rdi, %rax
+; X64-NEXT:    bsrq %rdi, %rcx
+; X64-NEXT:    movl $127, %eax
+; X64-NEXT:    cmovneq %rcx, %rax
 ; X64-NEXT:    xorq $63, %rax
 ; X64-NEXT:    retq
-; X64-NEXT:  .LBB7_1:
-; X64-NEXT:    movl $64, %eax
-; X64-NEXT:    retq
 ;
 ; X86-CLZ-LABEL: ctlz_i64_zero_test:
 ; X86-CLZ:       # %bb.0:
@@ -580,33 +589,33 @@ define i32 @ctlz_bsr(i32 %n) {
 ; FIXM...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/102885


More information about the llvm-commits mailing list