[llvm] ec3218d - [X86] Add cttz/ctlz tests for i686 with CMOV target
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 13 09:51:41 PDT 2020
Author: Simon Pilgrim
Date: 2020-03-13T16:51:13Z
New Revision: ec3218dbee467de2fe84c65e874cbe210b7b2bf4
URL: https://github.com/llvm/llvm-project/commit/ec3218dbee467de2fe84c65e874cbe210b7b2bf4
DIFF: https://github.com/llvm/llvm-project/commit/ec3218dbee467de2fe84c65e874cbe210b7b2bf4.diff
LOG: [X86] Add cttz/ctlz tests for i686 with CMOV target
Added:
Modified:
llvm/test/CodeGen/X86/clz.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/clz.ll b/llvm/test/CodeGen/X86/clz.ll
index 7884f3ebcc0c..3e44a8bbbf72 100644
--- a/llvm/test/CodeGen/X86/clz.ll
+++ b/llvm/test/CodeGen/X86/clz.ll
@@ -1,8 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=CHECK --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=CHECK --check-prefix=X64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+bmi,+lzcnt | FileCheck %s --check-prefix=CHECK --check-prefix=X32-CLZ
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi,+lzcnt | FileCheck %s --check-prefix=CHECK --check-prefix=X64-CLZ
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=CHECK,X86,X86-NOCMOV
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+cmov | FileCheck %s --check-prefixes=CHECK,X86,X86-CMOV
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+bmi,+lzcnt | FileCheck %s --check-prefixes=CHECK,X86-CLZ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi,+lzcnt | FileCheck %s --check-prefixes=CHECK,X64-CLZ
declare i8 @llvm.cttz.i8(i8, i1)
declare i16 @llvm.cttz.i16(i16, i1)
@@ -15,11 +16,11 @@ declare i32 @llvm.ctlz.i32(i32, i1)
declare i64 @llvm.ctlz.i64(i64, i1)
define i8 @cttz_i8(i8 %x) {
-; X32-LABEL: cttz_i8:
-; X32: # %bb.0:
-; X32-NEXT: bsfl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: def $al killed $al killed $eax
-; X32-NEXT: retl
+; X86-LABEL: cttz_i8:
+; X86: # %bb.0:
+; X86-NEXT: bsfl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
;
; X64-LABEL: cttz_i8:
; X64: # %bb.0:
@@ -27,11 +28,11 @@ define i8 @cttz_i8(i8 %x) {
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: cttz_i8:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: cttz_i8:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: # kill: def $al killed $al killed $eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8:
; X64-CLZ: # %bb.0:
@@ -43,20 +44,20 @@ define i8 @cttz_i8(i8 %x) {
}
define i16 @cttz_i16(i16 %x) {
-; X32-LABEL: cttz_i16:
-; X32: # %bb.0:
-; X32-NEXT: bsfw {{[0-9]+}}(%esp), %ax
-; X32-NEXT: retl
+; X86-LABEL: cttz_i16:
+; X86: # %bb.0:
+; X86-NEXT: bsfw {{[0-9]+}}(%esp), %ax
+; X86-NEXT: retl
;
; X64-LABEL: cttz_i16:
; X64: # %bb.0:
; X64-NEXT: bsfw %di, %ax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: cttz_i16:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: tzcntw {{[0-9]+}}(%esp), %ax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: cttz_i16:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: tzcntw {{[0-9]+}}(%esp), %ax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i16:
; X64-CLZ: # %bb.0:
@@ -67,20 +68,20 @@ define i16 @cttz_i16(i16 %x) {
}
define i32 @cttz_i32(i32 %x) {
-; X32-LABEL: cttz_i32:
-; X32: # %bb.0:
-; X32-NEXT: bsfl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+; X86-LABEL: cttz_i32:
+; X86: # %bb.0:
+; X86-NEXT: bsfl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: cttz_i32:
; X64: # %bb.0:
; X64-NEXT: bsfl %edi, %eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: cttz_i32:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: cttz_i32:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i32:
; X64-CLZ: # %bb.0:
@@ -91,40 +92,51 @@ define i32 @cttz_i32(i32 %x) {
}
define i64 @cttz_i64(i64 %x) {
-; X32-LABEL: cttz_i64:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: jne .LBB3_1
-; X32-NEXT: # %bb.2:
-; X32-NEXT: bsfl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl $32, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
-; X32-NEXT: .LBB3_1:
-; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
+; X86-NOCMOV-LABEL: cttz_i64:
+; X86-NOCMOV: # %bb.0:
+; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT: testl %eax, %eax
+; X86-NOCMOV-NEXT: jne .LBB3_1
+; X86-NOCMOV-NEXT: # %bb.2:
+; X86-NOCMOV-NEXT: bsfl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT: addl $32, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+; X86-NOCMOV-NEXT: .LBB3_1:
+; X86-NOCMOV-NEXT: bsfl %eax, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+;
+; X86-CMOV-LABEL: cttz_i64:
+; X86-CMOV: # %bb.0:
+; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-CMOV-NEXT: bsfl %ecx, %edx
+; X86-CMOV-NEXT: bsfl {{[0-9]+}}(%esp), %eax
+; X86-CMOV-NEXT: addl $32, %eax
+; X86-CMOV-NEXT: testl %ecx, %ecx
+; X86-CMOV-NEXT: cmovnel %edx, %eax
+; X86-CMOV-NEXT: xorl %edx, %edx
+; X86-CMOV-NEXT: retl
;
; X64-LABEL: cttz_i64:
; X64: # %bb.0:
; X64-NEXT: bsfq %rdi, %rax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: cttz_i64:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: testl %eax, %eax
-; X32-CLZ-NEXT: jne .LBB3_1
-; X32-CLZ-NEXT: # %bb.2:
-; X32-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: addl $32, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
-; X32-CLZ-NEXT: .LBB3_1:
-; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: cttz_i64:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: testl %eax, %eax
+; X86-CLZ-NEXT: jne .LBB3_1
+; X86-CLZ-NEXT: # %bb.2:
+; X86-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: addl $32, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
+; X86-CLZ-NEXT: .LBB3_1:
+; X86-CLZ-NEXT: tzcntl %eax, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i64:
; X64-CLZ: # %bb.0:
@@ -135,13 +147,13 @@ define i64 @cttz_i64(i64 %x) {
}
define i8 @ctlz_i8(i8 %x) {
-; X32-LABEL: ctlz_i8:
-; X32: # %bb.0:
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: bsrl %eax, %eax
-; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: def $al killed $al killed $eax
-; X32-NEXT: retl
+; X86-LABEL: ctlz_i8:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: bsrl %eax, %eax
+; X86-NEXT: xorl $7, %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
;
; X64-LABEL: ctlz_i8:
; X64: # %bb.0:
@@ -151,13 +163,13 @@ define i8 @ctlz_i8(i8 %x) {
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_i8:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: lzcntl %eax, %eax
-; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_i8:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: lzcntl %eax, %eax
+; X86-CLZ-NEXT: addl $-24, %eax
+; X86-CLZ-NEXT: # kill: def $al killed $al killed $eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8:
; X64-CLZ: # %bb.0:
@@ -171,12 +183,12 @@ define i8 @ctlz_i8(i8 %x) {
}
define i16 @ctlz_i16(i16 %x) {
-; X32-LABEL: ctlz_i16:
-; X32: # %bb.0:
-; X32-NEXT: bsrw {{[0-9]+}}(%esp), %ax
-; X32-NEXT: xorl $15, %eax
-; X32-NEXT: # kill: def $ax killed $ax killed $eax
-; X32-NEXT: retl
+; X86-LABEL: ctlz_i16:
+; X86: # %bb.0:
+; X86-NEXT: bsrw {{[0-9]+}}(%esp), %ax
+; X86-NEXT: xorl $15, %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: retl
;
; X64-LABEL: ctlz_i16:
; X64: # %bb.0:
@@ -185,10 +197,10 @@ define i16 @ctlz_i16(i16 %x) {
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_i16:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: lzcntw {{[0-9]+}}(%esp), %ax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_i16:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: lzcntw {{[0-9]+}}(%esp), %ax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i16:
; X64-CLZ: # %bb.0:
@@ -199,11 +211,11 @@ define i16 @ctlz_i16(i16 %x) {
}
define i32 @ctlz_i32(i32 %x) {
-; X32-LABEL: ctlz_i32:
-; X32: # %bb.0:
-; X32-NEXT: bsrl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: retl
+; X86-LABEL: ctlz_i32:
+; X86: # %bb.0:
+; X86-NEXT: bsrl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl $31, %eax
+; X86-NEXT: retl
;
; X64-LABEL: ctlz_i32:
; X64: # %bb.0:
@@ -211,10 +223,10 @@ define i32 @ctlz_i32(i32 %x) {
; X64-NEXT: xorl $31, %eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_i32:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_i32:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i32:
; X64-CLZ: # %bb.0:
@@ -225,22 +237,35 @@ define i32 @ctlz_i32(i32 %x) {
}
define i64 @ctlz_i64(i64 %x) {
-; X32-LABEL: ctlz_i64:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: jne .LBB7_1
-; X32-NEXT: # %bb.2:
-; X32-NEXT: bsrl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: addl $32, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
-; X32-NEXT: .LBB7_1:
-; X32-NEXT: bsrl %eax, %eax
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
+; X86-NOCMOV-LABEL: ctlz_i64:
+; X86-NOCMOV: # %bb.0:
+; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT: testl %eax, %eax
+; X86-NOCMOV-NEXT: jne .LBB7_1
+; X86-NOCMOV-NEXT: # %bb.2:
+; X86-NOCMOV-NEXT: bsrl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT: xorl $31, %eax
+; X86-NOCMOV-NEXT: addl $32, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+; X86-NOCMOV-NEXT: .LBB7_1:
+; X86-NOCMOV-NEXT: bsrl %eax, %eax
+; X86-NOCMOV-NEXT: xorl $31, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+;
+; X86-CMOV-LABEL: ctlz_i64:
+; X86-CMOV: # %bb.0:
+; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-CMOV-NEXT: bsrl %ecx, %edx
+; X86-CMOV-NEXT: xorl $31, %edx
+; X86-CMOV-NEXT: bsrl {{[0-9]+}}(%esp), %eax
+; X86-CMOV-NEXT: xorl $31, %eax
+; X86-CMOV-NEXT: addl $32, %eax
+; X86-CMOV-NEXT: testl %ecx, %ecx
+; X86-CMOV-NEXT: cmovnel %edx, %eax
+; X86-CMOV-NEXT: xorl %edx, %edx
+; X86-CMOV-NEXT: retl
;
; X64-LABEL: ctlz_i64:
; X64: # %bb.0:
@@ -248,20 +273,20 @@ define i64 @ctlz_i64(i64 %x) {
; X64-NEXT: xorq $63, %rax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_i64:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: testl %eax, %eax
-; X32-CLZ-NEXT: jne .LBB7_1
-; X32-CLZ-NEXT: # %bb.2:
-; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: addl $32, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
-; X32-CLZ-NEXT: .LBB7_1:
-; X32-CLZ-NEXT: lzcntl %eax, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_i64:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: testl %eax, %eax
+; X86-CLZ-NEXT: jne .LBB7_1
+; X86-CLZ-NEXT: # %bb.2:
+; X86-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: addl $32, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
+; X86-CLZ-NEXT: .LBB7_1:
+; X86-CLZ-NEXT: lzcntl %eax, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i64:
; X64-CLZ: # %bb.0:
@@ -273,21 +298,21 @@ define i64 @ctlz_i64(i64 %x) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i8 @ctlz_i8_zero_test(i8 %n) {
-; X32-LABEL: ctlz_i8_zero_test:
-; X32: # %bb.0:
-; X32-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-NEXT: testb %al, %al
-; X32-NEXT: je .LBB8_1
-; X32-NEXT: # %bb.2: # %cond.false
-; X32-NEXT: movzbl %al, %eax
-; X32-NEXT: bsrl %eax, %eax
-; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: def $al killed $al killed $eax
-; X32-NEXT: retl
-; X32-NEXT: .LBB8_1:
-; X32-NEXT: movb $8, %al
-; X32-NEXT: # kill: def $al killed $al killed $eax
-; X32-NEXT: retl
+; X86-LABEL: ctlz_i8_zero_test:
+; X86: # %bb.0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: testb %al, %al
+; X86-NEXT: je .LBB8_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: bsrl %eax, %eax
+; X86-NEXT: xorl $7, %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB8_1:
+; X86-NEXT: movb $8, %al
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
;
; X64-LABEL: ctlz_i8_zero_test:
; X64: # %bb.0:
@@ -304,13 +329,13 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_i8_zero_test:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: lzcntl %eax, %eax
-; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_i8_zero_test:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: lzcntl %eax, %eax
+; X86-CLZ-NEXT: addl $-24, %eax
+; X86-CLZ-NEXT: # kill: def $al killed $al killed $eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8_zero_test:
; X64-CLZ: # %bb.0:
@@ -325,20 +350,20 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i16 @ctlz_i16_zero_test(i16 %n) {
-; X32-LABEL: ctlz_i16_zero_test:
-; X32: # %bb.0:
-; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: testw %ax, %ax
-; X32-NEXT: je .LBB9_1
-; X32-NEXT: # %bb.2: # %cond.false
-; X32-NEXT: bsrw %ax, %ax
-; X32-NEXT: xorl $15, %eax
-; X32-NEXT: # kill: def $ax killed $ax killed $eax
-; X32-NEXT: retl
-; X32-NEXT: .LBB9_1:
-; X32-NEXT: movw $16, %ax
-; X32-NEXT: # kill: def $ax killed $ax killed $eax
-; X32-NEXT: retl
+; X86-LABEL: ctlz_i16_zero_test:
+; X86: # %bb.0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testw %ax, %ax
+; X86-NEXT: je .LBB9_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: bsrw %ax, %ax
+; X86-NEXT: xorl $15, %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB9_1:
+; X86-NEXT: movw $16, %ax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: retl
;
; X64-LABEL: ctlz_i16_zero_test:
; X64: # %bb.0:
@@ -354,10 +379,10 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_i16_zero_test:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: lzcntw {{[0-9]+}}(%esp), %ax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_i16_zero_test:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: lzcntw {{[0-9]+}}(%esp), %ax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i16_zero_test:
; X64-CLZ: # %bb.0:
@@ -369,18 +394,18 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i32 @ctlz_i32_zero_test(i32 %n) {
-; X32-LABEL: ctlz_i32_zero_test:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: je .LBB10_1
-; X32-NEXT: # %bb.2: # %cond.false
-; X32-NEXT: bsrl %eax, %eax
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: retl
-; X32-NEXT: .LBB10_1:
-; X32-NEXT: movl $32, %eax
-; X32-NEXT: retl
+; X86-LABEL: ctlz_i32_zero_test:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB10_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: bsrl %eax, %eax
+; X86-NEXT: xorl $31, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB10_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
;
; X64-LABEL: ctlz_i32_zero_test:
; X64: # %bb.0:
@@ -394,10 +419,10 @@ define i32 @ctlz_i32_zero_test(i32 %n) {
; X64-NEXT: movl $32, %eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_i32_zero_test:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_i32_zero_test:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i32_zero_test:
; X64-CLZ: # %bb.0:
@@ -409,27 +434,42 @@ define i32 @ctlz_i32_zero_test(i32 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i64 @ctlz_i64_zero_test(i64 %n) {
-; X32-LABEL: ctlz_i64_zero_test:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: bsrl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl $63, %eax
-; X32-NEXT: je .LBB11_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: .LBB11_2:
-; X32-NEXT: testl %ecx, %ecx
-; X32-NEXT: jne .LBB11_3
-; X32-NEXT: # %bb.4:
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: addl $32, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
-; X32-NEXT: .LBB11_3:
-; X32-NEXT: bsrl %ecx, %eax
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
+; X86-NOCMOV-LABEL: ctlz_i64_zero_test:
+; X86-NOCMOV: # %bb.0:
+; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOCMOV-NEXT: bsrl {{[0-9]+}}(%esp), %edx
+; X86-NOCMOV-NEXT: movl $63, %eax
+; X86-NOCMOV-NEXT: je .LBB11_2
+; X86-NOCMOV-NEXT: # %bb.1:
+; X86-NOCMOV-NEXT: movl %edx, %eax
+; X86-NOCMOV-NEXT: .LBB11_2:
+; X86-NOCMOV-NEXT: testl %ecx, %ecx
+; X86-NOCMOV-NEXT: jne .LBB11_3
+; X86-NOCMOV-NEXT: # %bb.4:
+; X86-NOCMOV-NEXT: xorl $31, %eax
+; X86-NOCMOV-NEXT: addl $32, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+; X86-NOCMOV-NEXT: .LBB11_3:
+; X86-NOCMOV-NEXT: bsrl %ecx, %eax
+; X86-NOCMOV-NEXT: xorl $31, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+;
+; X86-CMOV-LABEL: ctlz_i64_zero_test:
+; X86-CMOV: # %bb.0:
+; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-CMOV-NEXT: bsrl {{[0-9]+}}(%esp), %eax
+; X86-CMOV-NEXT: movl $63, %edx
+; X86-CMOV-NEXT: cmovnel %eax, %edx
+; X86-CMOV-NEXT: xorl $31, %edx
+; X86-CMOV-NEXT: addl $32, %edx
+; X86-CMOV-NEXT: bsrl %ecx, %eax
+; X86-CMOV-NEXT: xorl $31, %eax
+; X86-CMOV-NEXT: testl %ecx, %ecx
+; X86-CMOV-NEXT: cmovel %edx, %eax
+; X86-CMOV-NEXT: xorl %edx, %edx
+; X86-CMOV-NEXT: retl
;
; X64-LABEL: ctlz_i64_zero_test:
; X64: # %bb.0:
@@ -443,20 +483,20 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
; X64-NEXT: movl $64, %eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_i64_zero_test:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: testl %eax, %eax
-; X32-CLZ-NEXT: jne .LBB11_1
-; X32-CLZ-NEXT: # %bb.2:
-; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: addl $32, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
-; X32-CLZ-NEXT: .LBB11_1:
-; X32-CLZ-NEXT: lzcntl %eax, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_i64_zero_test:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: testl %eax, %eax
+; X86-CLZ-NEXT: jne .LBB11_1
+; X86-CLZ-NEXT: # %bb.2:
+; X86-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: addl $32, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
+; X86-CLZ-NEXT: .LBB11_1:
+; X86-CLZ-NEXT: lzcntl %eax, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i64_zero_test:
; X64-CLZ: # %bb.0:
@@ -468,20 +508,20 @@ define i64 @ctlz_i64_zero_test(i64 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i8 @cttz_i8_zero_test(i8 %n) {
-; X32-LABEL: cttz_i8_zero_test:
-; X32: # %bb.0:
-; X32-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-NEXT: testb %al, %al
-; X32-NEXT: je .LBB12_1
-; X32-NEXT: # %bb.2: # %cond.false
-; X32-NEXT: movzbl %al, %eax
-; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: # kill: def $al killed $al killed $eax
-; X32-NEXT: retl
-; X32-NEXT: .LBB12_1:
-; X32-NEXT: movb $8, %al
-; X32-NEXT: # kill: def $al killed $al killed $eax
-; X32-NEXT: retl
+; X86-LABEL: cttz_i8_zero_test:
+; X86: # %bb.0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: testb %al, %al
+; X86-NEXT: je .LBB12_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: bsfl %eax, %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB12_1:
+; X86-NEXT: movb $8, %al
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
;
; X64-LABEL: cttz_i8_zero_test:
; X64: # %bb.0:
@@ -497,13 +537,13 @@ define i8 @cttz_i8_zero_test(i8 %n) {
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: cttz_i8_zero_test:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movl $256, %eax # imm = 0x100
-; X32-CLZ-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: cttz_i8_zero_test:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movl $256, %eax # imm = 0x100
+; X86-CLZ-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: tzcntl %eax, %eax
+; X86-CLZ-NEXT: # kill: def $al killed $al killed $eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8_zero_test:
; X64-CLZ: # %bb.0:
@@ -517,17 +557,17 @@ define i8 @cttz_i8_zero_test(i8 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i16 @cttz_i16_zero_test(i16 %n) {
-; X32-LABEL: cttz_i16_zero_test:
-; X32: # %bb.0:
-; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: testw %ax, %ax
-; X32-NEXT: je .LBB13_1
-; X32-NEXT: # %bb.2: # %cond.false
-; X32-NEXT: bsfw %ax, %ax
-; X32-NEXT: retl
-; X32-NEXT: .LBB13_1:
-; X32-NEXT: movw $16, %ax
-; X32-NEXT: retl
+; X86-LABEL: cttz_i16_zero_test:
+; X86: # %bb.0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testw %ax, %ax
+; X86-NEXT: je .LBB13_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: bsfw %ax, %ax
+; X86-NEXT: retl
+; X86-NEXT: .LBB13_1:
+; X86-NEXT: movw $16, %ax
+; X86-NEXT: retl
;
; X64-LABEL: cttz_i16_zero_test:
; X64: # %bb.0:
@@ -540,10 +580,10 @@ define i16 @cttz_i16_zero_test(i16 %n) {
; X64-NEXT: movw $16, %ax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: cttz_i16_zero_test:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: tzcntw {{[0-9]+}}(%esp), %ax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: cttz_i16_zero_test:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: tzcntw {{[0-9]+}}(%esp), %ax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i16_zero_test:
; X64-CLZ: # %bb.0:
@@ -555,17 +595,17 @@ define i16 @cttz_i16_zero_test(i16 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i32 @cttz_i32_zero_test(i32 %n) {
-; X32-LABEL: cttz_i32_zero_test:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: je .LBB14_1
-; X32-NEXT: # %bb.2: # %cond.false
-; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: retl
-; X32-NEXT: .LBB14_1:
-; X32-NEXT: movl $32, %eax
-; X32-NEXT: retl
+; X86-LABEL: cttz_i32_zero_test:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB14_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB14_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
;
; X64-LABEL: cttz_i32_zero_test:
; X64: # %bb.0:
@@ -578,10 +618,10 @@ define i32 @cttz_i32_zero_test(i32 %n) {
; X64-NEXT: movl $32, %eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: cttz_i32_zero_test:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: cttz_i32_zero_test:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i32_zero_test:
; X64-CLZ: # %bb.0:
@@ -593,25 +633,37 @@ define i32 @cttz_i32_zero_test(i32 %n) {
; Generate a test and branch to handle zero inputs because bsr/bsf are very slow.
define i64 @cttz_i64_zero_test(i64 %n) {
-; X32-LABEL: cttz_i64_zero_test:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: bsfl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl $32, %eax
-; X32-NEXT: je .LBB15_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: .LBB15_2:
-; X32-NEXT: testl %ecx, %ecx
-; X32-NEXT: jne .LBB15_3
-; X32-NEXT: # %bb.4:
-; X32-NEXT: addl $32, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
-; X32-NEXT: .LBB15_3:
-; X32-NEXT: bsfl %ecx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
+; X86-NOCMOV-LABEL: cttz_i64_zero_test:
+; X86-NOCMOV: # %bb.0:
+; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOCMOV-NEXT: bsfl {{[0-9]+}}(%esp), %edx
+; X86-NOCMOV-NEXT: movl $32, %eax
+; X86-NOCMOV-NEXT: je .LBB15_2
+; X86-NOCMOV-NEXT: # %bb.1:
+; X86-NOCMOV-NEXT: movl %edx, %eax
+; X86-NOCMOV-NEXT: .LBB15_2:
+; X86-NOCMOV-NEXT: testl %ecx, %ecx
+; X86-NOCMOV-NEXT: jne .LBB15_3
+; X86-NOCMOV-NEXT: # %bb.4:
+; X86-NOCMOV-NEXT: addl $32, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+; X86-NOCMOV-NEXT: .LBB15_3:
+; X86-NOCMOV-NEXT: bsfl %ecx, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+;
+; X86-CMOV-LABEL: cttz_i64_zero_test:
+; X86-CMOV: # %bb.0:
+; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-CMOV-NEXT: bsfl {{[0-9]+}}(%esp), %ecx
+; X86-CMOV-NEXT: movl $32, %edx
+; X86-CMOV-NEXT: cmovnel %ecx, %edx
+; X86-CMOV-NEXT: addl $32, %edx
+; X86-CMOV-NEXT: bsfl %eax, %eax
+; X86-CMOV-NEXT: cmovel %edx, %eax
+; X86-CMOV-NEXT: xorl %edx, %edx
+; X86-CMOV-NEXT: retl
;
; X64-LABEL: cttz_i64_zero_test:
; X64: # %bb.0:
@@ -624,20 +676,20 @@ define i64 @cttz_i64_zero_test(i64 %n) {
; X64-NEXT: movl $64, %eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: cttz_i64_zero_test:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: testl %eax, %eax
-; X32-CLZ-NEXT: jne .LBB15_1
-; X32-CLZ-NEXT: # %bb.2:
-; X32-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: addl $32, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
-; X32-CLZ-NEXT: .LBB15_1:
-; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: cttz_i64_zero_test:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: testl %eax, %eax
+; X86-CLZ-NEXT: jne .LBB15_1
+; X86-CLZ-NEXT: # %bb.2:
+; X86-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: addl $32, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
+; X86-CLZ-NEXT: .LBB15_1:
+; X86-CLZ-NEXT: tzcntl %eax, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i64_zero_test:
; X64-CLZ: # %bb.0:
@@ -653,18 +705,18 @@ define i64 @cttz_i64_zero_test(i64 %n) {
; FIXME: The compare and branch are produced late in IR (by CodeGenPrepare), and
; codegen doesn't know how to delete the movl and je.
define i32 @ctlz_i32_fold_cmov(i32 %n) {
-; X32-LABEL: ctlz_i32_fold_cmov:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl $1, %eax
-; X32-NEXT: je .LBB16_1
-; X32-NEXT: # %bb.2: # %cond.false
-; X32-NEXT: bsrl %eax, %eax
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: retl
-; X32-NEXT: .LBB16_1:
-; X32-NEXT: movl $32, %eax
-; X32-NEXT: retl
+; X86-LABEL: ctlz_i32_fold_cmov:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: je .LBB16_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: bsrl %eax, %eax
+; X86-NEXT: xorl $31, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB16_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
;
; X64-LABEL: ctlz_i32_fold_cmov:
; X64: # %bb.0:
@@ -678,12 +730,12 @@ define i32 @ctlz_i32_fold_cmov(i32 %n) {
; X64-NEXT: movl $32, %eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_i32_fold_cmov:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: orl $1, %eax
-; X32-CLZ-NEXT: lzcntl %eax, %eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_i32_fold_cmov:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: orl $1, %eax
+; X86-CLZ-NEXT: lzcntl %eax, %eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i32_fold_cmov:
; X64-CLZ: # %bb.0:
@@ -699,21 +751,21 @@ define i32 @ctlz_i32_fold_cmov(i32 %n) {
; the most significant bit, which is what 'bsr' does natively.
; FIXME: We should probably select BSR instead of LZCNT in these circumstances.
define i32 @ctlz_bsr(i32 %n) {
-; X32-LABEL: ctlz_bsr:
-; X32: # %bb.0:
-; X32-NEXT: bsrl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+; X86-LABEL: ctlz_bsr:
+; X86: # %bb.0:
+; X86-NEXT: bsrl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: ctlz_bsr:
; X64: # %bb.0:
; X64-NEXT: bsrl %edi, %eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_bsr:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: xorl $31, %eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_bsr:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: xorl $31, %eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_bsr:
; X64-CLZ: # %bb.0:
@@ -729,20 +781,20 @@ define i32 @ctlz_bsr(i32 %n) {
; FIXME: The compare and branch are produced late in IR (by CodeGenPrepare), and
; codegen doesn't know how to combine the $32 and $31 into $63.
define i32 @ctlz_bsr_zero_test(i32 %n) {
-; X32-LABEL: ctlz_bsr_zero_test:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: je .LBB18_1
-; X32-NEXT: # %bb.2: # %cond.false
-; X32-NEXT: bsrl %eax, %eax
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: retl
-; X32-NEXT: .LBB18_1:
-; X32-NEXT: movl $32, %eax
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: retl
+; X86-LABEL: ctlz_bsr_zero_test:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB18_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: bsrl %eax, %eax
+; X86-NEXT: xorl $31, %eax
+; X86-NEXT: xorl $31, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB18_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: xorl $31, %eax
+; X86-NEXT: retl
;
; X64-LABEL: ctlz_bsr_zero_test:
; X64: # %bb.0:
@@ -758,11 +810,11 @@ define i32 @ctlz_bsr_zero_test(i32 %n) {
; X64-NEXT: xorl $31, %eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_bsr_zero_test:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: xorl $31, %eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_bsr_zero_test:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: lzcntl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: xorl $31, %eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_bsr_zero_test:
; X64-CLZ: # %bb.0:
@@ -775,14 +827,14 @@ define i32 @ctlz_bsr_zero_test(i32 %n) {
}
define i8 @cttz_i8_knownbits(i8 %x) {
-; X32-LABEL: cttz_i8_knownbits:
-; X32: # %bb.0:
-; X32-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-NEXT: orb $2, %al
-; X32-NEXT: movzbl %al, %eax
-; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: # kill: def $al killed $al killed $eax
-; X32-NEXT: retl
+; X86-LABEL: cttz_i8_knownbits:
+; X86: # %bb.0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: orb $2, %al
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: bsfl %eax, %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
;
; X64-LABEL: cttz_i8_knownbits:
; X64: # %bb.0:
@@ -792,14 +844,14 @@ define i8 @cttz_i8_knownbits(i8 %x) {
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: cttz_i8_knownbits:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-CLZ-NEXT: orb $2, %al
-; X32-CLZ-NEXT: movzbl %al, %eax
-; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: cttz_i8_knownbits:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-CLZ-NEXT: orb $2, %al
+; X86-CLZ-NEXT: movzbl %al, %eax
+; X86-CLZ-NEXT: tzcntl %eax, %eax
+; X86-CLZ-NEXT: # kill: def $al killed $al killed $eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8_knownbits:
; X64-CLZ: # %bb.0:
@@ -815,15 +867,15 @@ define i8 @cttz_i8_knownbits(i8 %x) {
}
define i8 @ctlz_i8_knownbits(i8 %x) {
-; X32-LABEL: ctlz_i8_knownbits:
-; X32: # %bb.0:
-; X32-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-NEXT: orb $64, %al
-; X32-NEXT: movzbl %al, %eax
-; X32-NEXT: bsrl %eax, %eax
-; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: def $al killed $al killed $eax
-; X32-NEXT: retl
+; X86-LABEL: ctlz_i8_knownbits:
+; X86: # %bb.0:
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: orb $64, %al
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: bsrl %eax, %eax
+; X86-NEXT: xorl $7, %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
;
; X64-LABEL: ctlz_i8_knownbits:
; X64: # %bb.0:
@@ -834,15 +886,15 @@ define i8 @ctlz_i8_knownbits(i8 %x) {
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_i8_knownbits:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-CLZ-NEXT: orb $64, %al
-; X32-CLZ-NEXT: movzbl %al, %eax
-; X32-CLZ-NEXT: lzcntl %eax, %eax
-; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: def $al killed $al killed $eax
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_i8_knownbits:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-CLZ-NEXT: orb $64, %al
+; X86-CLZ-NEXT: movzbl %al, %eax
+; X86-CLZ-NEXT: lzcntl %eax, %eax
+; X86-CLZ-NEXT: addl $-24, %eax
+; X86-CLZ-NEXT: # kill: def $al killed $al killed $eax
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8_knownbits:
; X64-CLZ: # %bb.0:
@@ -862,24 +914,39 @@ define i8 @ctlz_i8_knownbits(i8 %x) {
; Make sure we can detect that the input is non-zero and avoid cmov after BSR
; This is relevant for 32-bit mode without lzcnt
define i64 @ctlz_i64_zero_test_knownneverzero(i64 %n) {
-; X32-LABEL: ctlz_i64_zero_test_knownneverzero:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: jne .LBB21_1
-; X32-NEXT: # %bb.2:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl $1, %eax
-; X32-NEXT: bsrl %eax, %eax
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: orl $32, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
-; X32-NEXT: .LBB21_1:
-; X32-NEXT: bsrl %eax, %eax
-; X32-NEXT: xorl $31, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
+; X86-NOCMOV-LABEL: ctlz_i64_zero_test_knownneverzero:
+; X86-NOCMOV: # %bb.0:
+; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT: testl %eax, %eax
+; X86-NOCMOV-NEXT: jne .LBB21_1
+; X86-NOCMOV-NEXT: # %bb.2:
+; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT: orl $1, %eax
+; X86-NOCMOV-NEXT: bsrl %eax, %eax
+; X86-NOCMOV-NEXT: xorl $31, %eax
+; X86-NOCMOV-NEXT: orl $32, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+; X86-NOCMOV-NEXT: .LBB21_1:
+; X86-NOCMOV-NEXT: bsrl %eax, %eax
+; X86-NOCMOV-NEXT: xorl $31, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+;
+; X86-CMOV-LABEL: ctlz_i64_zero_test_knownneverzero:
+; X86-CMOV: # %bb.0:
+; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-CMOV-NEXT: orl $1, %eax
+; X86-CMOV-NEXT: bsrl %ecx, %edx
+; X86-CMOV-NEXT: xorl $31, %edx
+; X86-CMOV-NEXT: bsrl %eax, %eax
+; X86-CMOV-NEXT: xorl $31, %eax
+; X86-CMOV-NEXT: orl $32, %eax
+; X86-CMOV-NEXT: testl %ecx, %ecx
+; X86-CMOV-NEXT: cmovnel %edx, %eax
+; X86-CMOV-NEXT: xorl %edx, %edx
+; X86-CMOV-NEXT: retl
;
; X64-LABEL: ctlz_i64_zero_test_knownneverzero:
; X64: # %bb.0:
@@ -893,22 +960,22 @@ define i64 @ctlz_i64_zero_test_knownneverzero(i64 %n) {
; X64-NEXT: movl $64, %eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: ctlz_i64_zero_test_knownneverzero:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: testl %eax, %eax
-; X32-CLZ-NEXT: jne .LBB21_1
-; X32-CLZ-NEXT: # %bb.2:
-; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: orl $1, %eax
-; X32-CLZ-NEXT: lzcntl %eax, %eax
-; X32-CLZ-NEXT: orl $32, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
-; X32-CLZ-NEXT: .LBB21_1:
-; X32-CLZ-NEXT: lzcntl %eax, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: ctlz_i64_zero_test_knownneverzero:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: testl %eax, %eax
+; X86-CLZ-NEXT: jne .LBB21_1
+; X86-CLZ-NEXT: # %bb.2:
+; X86-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: orl $1, %eax
+; X86-CLZ-NEXT: lzcntl %eax, %eax
+; X86-CLZ-NEXT: orl $32, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
+; X86-CLZ-NEXT: .LBB21_1:
+; X86-CLZ-NEXT: lzcntl %eax, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i64_zero_test_knownneverzero:
; X64-CLZ: # %bb.0:
@@ -923,22 +990,35 @@ define i64 @ctlz_i64_zero_test_knownneverzero(i64 %n) {
; Make sure we can detect that the input is non-zero and avoid cmov after BSF
; This is relevant for 32-bit mode without tzcnt
define i64 @cttz_i64_zero_test_knownneverzero(i64 %n) {
-; X32-LABEL: cttz_i64_zero_test_knownneverzero:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: jne .LBB22_1
-; X32-NEXT: # %bb.2:
-; X32-NEXT: movl $-2147483648, %eax # imm = 0x80000000
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: orl $32, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
-; X32-NEXT: .LBB22_1:
-; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
+; X86-NOCMOV-LABEL: cttz_i64_zero_test_knownneverzero:
+; X86-NOCMOV: # %bb.0:
+; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT: testl %eax, %eax
+; X86-NOCMOV-NEXT: jne .LBB22_1
+; X86-NOCMOV-NEXT: # %bb.2:
+; X86-NOCMOV-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-NOCMOV-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NOCMOV-NEXT: bsfl %eax, %eax
+; X86-NOCMOV-NEXT: orl $32, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+; X86-NOCMOV-NEXT: .LBB22_1:
+; X86-NOCMOV-NEXT: bsfl %eax, %eax
+; X86-NOCMOV-NEXT: xorl %edx, %edx
+; X86-NOCMOV-NEXT: retl
+;
+; X86-CMOV-LABEL: cttz_i64_zero_test_knownneverzero:
+; X86-CMOV: # %bb.0:
+; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-CMOV-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-CMOV-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-CMOV-NEXT: bsfl %ecx, %edx
+; X86-CMOV-NEXT: bsfl %eax, %eax
+; X86-CMOV-NEXT: orl $32, %eax
+; X86-CMOV-NEXT: testl %ecx, %ecx
+; X86-CMOV-NEXT: cmovnel %edx, %eax
+; X86-CMOV-NEXT: xorl %edx, %edx
+; X86-CMOV-NEXT: retl
;
; X64-LABEL: cttz_i64_zero_test_knownneverzero:
; X64: # %bb.0:
@@ -952,22 +1032,22 @@ define i64 @cttz_i64_zero_test_knownneverzero(i64 %n) {
; X64-NEXT: movl $64, %eax
; X64-NEXT: retq
;
-; X32-CLZ-LABEL: cttz_i64_zero_test_knownneverzero:
-; X32-CLZ: # %bb.0:
-; X32-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: testl %eax, %eax
-; X32-CLZ-NEXT: jne .LBB22_1
-; X32-CLZ-NEXT: # %bb.2:
-; X32-CLZ-NEXT: movl $-2147483648, %eax # imm = 0x80000000
-; X32-CLZ-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: orl $32, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
-; X32-CLZ-NEXT: .LBB22_1:
-; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: xorl %edx, %edx
-; X32-CLZ-NEXT: retl
+; X86-CLZ-LABEL: cttz_i64_zero_test_knownneverzero:
+; X86-CLZ: # %bb.0:
+; X86-CLZ-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: testl %eax, %eax
+; X86-CLZ-NEXT: jne .LBB22_1
+; X86-CLZ-NEXT: # %bb.2:
+; X86-CLZ-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X86-CLZ-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-CLZ-NEXT: tzcntl %eax, %eax
+; X86-CLZ-NEXT: orl $32, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
+; X86-CLZ-NEXT: .LBB22_1:
+; X86-CLZ-NEXT: tzcntl %eax, %eax
+; X86-CLZ-NEXT: xorl %edx, %edx
+; X86-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i64_zero_test_knownneverzero:
; X64-CLZ: # %bb.0:
More information about the llvm-commits
mailing list