[llvm] 6c6fe4b - [X86] known-never-zero.ll - add 32-bit test coverage
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sun Mar 24 04:34:06 PDT 2024
Author: Simon Pilgrim
Date: 2024-03-24T11:33:51Z
New Revision: 6c6fe4b2aea8631001b11abee62146d4aca01cee
URL: https://github.com/llvm/llvm-project/commit/6c6fe4b2aea8631001b11abee62146d4aca01cee
DIFF: https://github.com/llvm/llvm-project/commit/6c6fe4b2aea8631001b11abee62146d4aca01cee.diff
LOG: [X86] known-never-zero.ll - add 32-bit test coverage
Enabled vector coverage as well: i686+SSE2 and x64_64+AVX
Should improve test quality for #85722
Added:
Modified:
llvm/test/CodeGen/X86/known-never-zero.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/known-never-zero.ll b/llvm/test/CodeGen/X86/known-never-zero.ll
index cc9862769f2b66..423516bc3271eb 100644
--- a/llvm/test/CodeGen/X86/known-never-zero.ll
+++ b/llvm/test/CodeGen/X86/known-never-zero.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=CHECK
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64
;; Use cttz to test if we properly prove never-zero. There is a very
;; simple transform from cttz -> cttz_zero_undef if its operand is
@@ -18,41 +19,70 @@ declare i32 @llvm.fshl.i32(i32, i32, i32)
declare i32 @llvm.fshr.i32(i32, i32, i32)
define i32 @or_known_nonzero(i32 %x) {
-; CHECK-LABEL: or_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%z = or i32 %x, 1
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @or_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: or_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl %esi, %edi
-; CHECK-NEXT: je .LBB1_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB1_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: je .LBB1_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB1_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl %esi, %edi
+; X64-NEXT: je .LBB1_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB1_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = or i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @select_known_nonzero(i1 %c, i32 %x) {
-; CHECK-LABEL: select_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %esi
-; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: movl $122, %eax
-; CHECK-NEXT: cmovnel %esi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: select_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT: movl $122, %ecx
+; X86-NEXT: cmovnel %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: select_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %esi
+; X64-NEXT: testb $1, %dil
+; X64-NEXT: movl $122, %eax
+; X64-NEXT: cmovnel %esi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%y = or i32 %x, 1
%z = select i1 %c, i32 %y, i32 122
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -60,20 +90,36 @@ define i32 @select_known_nonzero(i1 %c, i32 %x) {
}
define i32 @select_maybe_zero(i1 %c, i32 %x) {
-; CHECK-LABEL: select_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %esi
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: cmovnel %esi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB3_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB3_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: select_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: orl $1, %ecx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: testb $1, {{[0-9]+}}(%esp)
+; X86-NEXT: cmovnel %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB3_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB3_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: select_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %esi
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: testb $1, %dil
+; X64-NEXT: cmovnel %esi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB3_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB3_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %x, 1
%z = select i1 %c, i32 %y, i32 0
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -81,28 +127,45 @@ define i32 @select_maybe_zero(i1 %c, i32 %x) {
}
define i32 @shl_known_nonzero_1s_bit_set(i32 %x) {
-; CHECK-LABEL: shl_known_nonzero_1s_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $123, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_1s_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $123, %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_1s_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $123, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = shl i32 123, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @shl_known_nonzero_nsw(i32 %x, i32 %yy) {
-; CHECK-LABEL: shl_known_nonzero_nsw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_nsw:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_nsw:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = shl nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -110,14 +173,23 @@ define i32 @shl_known_nonzero_nsw(i32 %x, i32 %yy) {
}
define i32 @shl_known_nonzero_nuw(i32 %x, i32 %yy) {
-; CHECK-LABEL: shl_known_nonzero_nuw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_known_nonzero_nuw:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_known_nonzero_nuw:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = shl nuw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -125,67 +197,116 @@ define i32 @shl_known_nonzero_nuw(i32 %x, i32 %yy) {
}
define i32 @shl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: shl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB7_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB7_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: shl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB7_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB7_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: shl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB7_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB7_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = shl nuw nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @uaddsat_known_nonzero(i32 %x) {
-; CHECK-LABEL: uaddsat_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: incl %edi
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovnel %edi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: uaddsat_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: movl $-1, %ecx
+; X86-NEXT: cmovnel %eax, %ecx
+; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uaddsat_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: incl %edi
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovnel %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.uadd.sat.i32(i32 %x, i32 1)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @uaddsat_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: uaddsat_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovael %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB9_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB9_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: uaddsat_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-1, %eax
+; X86-NEXT: cmovael %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB9_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB9_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: uaddsat_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: movl $-1, %eax
+; X64-NEXT: cmovael %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB9_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB9_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @umax_known_nonzero(i32 %x, i32 %y) {
-; CHECK-LABEL: umax_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: cmpl %eax, %edi
-; CHECK-NEXT: cmoval %edi, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umax_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: cmpl %edx, %eax
+; X86-NEXT: cmoval %eax, %edx
+; X86-NEXT: rep bsfl %edx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umax_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: cmpl %eax, %edi
+; X64-NEXT: cmoval %edi, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%yy = shl nuw i32 4, %y
%z = call i32 @llvm.umax.i32(i32 %x, i32 %yy)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -193,35 +314,62 @@ define i32 @umax_known_nonzero(i32 %x, i32 %y) {
}
define i32 @umax_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: umax_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl %esi, %edi
-; CHECK-NEXT: cmoval %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB11_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB11_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umax_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl %eax, %ecx
+; X86-NEXT: cmoval %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB11_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB11_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umax_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl %esi, %edi
+; X64-NEXT: cmoval %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB11_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB11_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.umax.i32(i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @umin_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: umin_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovbl %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umin_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovbl %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umin_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovbl %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.umin.i32(i32 %x, i32 %y)
@@ -230,36 +378,63 @@ define i32 @umin_known_nonzero(i32 %xx, i32 %yy) {
}
define i32 @umin_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: umin_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $54, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovbl %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB13_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB13_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: umin_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl $54, %ecx
+; X86-NEXT: movl $54, %eax
+; X86-NEXT: cmovbl %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB13_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB13_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: umin_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $54, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovbl %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB13_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB13_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.umin.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @smin_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: smin_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovll %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smin_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovll %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovll %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.smin.i32(i32 %x, i32 %y)
@@ -268,36 +443,63 @@ define i32 @smin_known_nonzero(i32 %xx, i32 %yy) {
}
define i32 @smin_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: smin_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $54, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovll %edi, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB15_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB15_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smin_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: cmpl $54, %ecx
+; X86-NEXT: movl $54, %eax
+; X86-NEXT: cmovll %ecx, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB15_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB15_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smin_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $54, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovll %edi, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB15_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB15_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.smin.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @smax_known_nonzero(i32 %xx, i32 %yy) {
-; CHECK-LABEL: smax_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $4, %eax
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: addl $4, %esi
-; CHECK-NEXT: cmpl %esi, %eax
-; CHECK-NEXT: cmovgl %eax, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smax_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $4, %edx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: addl $4, %eax
+; X86-NEXT: cmpl %eax, %edx
+; X86-NEXT: cmovgl %edx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $4, %eax
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: addl $4, %esi
+; X64-NEXT: cmpl %esi, %eax
+; X64-NEXT: cmovgl %eax, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%x = shl nuw i32 4, %xx
%y = add nuw nsw i32 %yy, 4
%z = call i32 @llvm.smax.i32(i32 %x, i32 %y)
@@ -306,35 +508,61 @@ define i32 @smax_known_nonzero(i32 %xx, i32 %yy) {
}
define i32 @smax_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: smax_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: cmpl $55, %edi
-; CHECK-NEXT: movl $54, %eax
-; CHECK-NEXT: cmovgel %edi, %eax
-; CHECK-NEXT: bsfl %eax, %ecx
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: smax_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $55, %eax
+; X86-NEXT: movl $54, %ecx
+; X86-NEXT: cmovgel %eax, %ecx
+; X86-NEXT: bsfl %ecx, %ecx
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: cmovnel %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: smax_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $55, %edi
+; X64-NEXT: movl $54, %eax
+; X64-NEXT: cmovgel %edi, %eax
+; X64-NEXT: bsfl %eax, %ecx
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: cmovnel %ecx, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.smax.i32(i32 %x, i32 54)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @rotr_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotr_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB18_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB18_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB18_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB18_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB18_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB18_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%shr = lshr i32 %x, %y
%sub = sub i32 32, %y
@@ -345,19 +573,33 @@ define i32 @rotr_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotr_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotr_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB19_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB19_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB19_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB19_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB19_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB19_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%shr = lshr i32 %x, %y
%sub = sub i32 32, %y
%shl = shl i32 %x, %sub
@@ -367,14 +609,23 @@ define i32 @rotr_maybe_zero(i32 %x, i32 %y) {
}
define i32 @rotr_with_fshr_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotr_with_fshr_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_with_fshr_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_with_fshr_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%z = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -382,39 +633,68 @@ define i32 @rotr_with_fshr_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotr_with_fshr_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotr_with_fshr_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: rorl %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB21_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB21_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotr_with_fshr_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rorl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB21_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB21_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotr_with_fshr_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: rorl %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB21_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB21_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @rotl_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotl_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB22_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB22_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB22_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB22_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB22_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB22_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%shl = shl i32 %x, %y
%sub = sub i32 32, %y
@@ -425,19 +705,33 @@ define i32 @rotl_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB23_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB23_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB23_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB23_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB23_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB23_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%shl = shl i32 %x, %y
%sub = sub i32 32, %y
%shr = lshr i32 %x, %sub
@@ -447,14 +741,23 @@ define i32 @rotl_maybe_zero(i32 %x, i32 %y) {
}
define i32 @rotl_with_fshl_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: rotl_with_fshl_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: orl $256, %edi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_with_fshl_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_with_fshl_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: orl $256, %edi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 256
%z = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -462,47 +765,78 @@ define i32 @rotl_with_fshl_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @rotl_with_fshl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: rotl_with_fshl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: roll %cl, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB25_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB25_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: rotl_with_fshl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: roll %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB25_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB25_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: rotl_with_fshl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: roll %cl, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB25_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB25_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %y)
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sra_known_nonzero_sign_bit_set(i32 %x) {
-; CHECK-LABEL: sra_known_nonzero_sign_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_known_nonzero_sign_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_known_nonzero_sign_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = ashr i32 2147606891, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sra_known_nonzero_exact(i32 %x, i32 %yy) {
-; CHECK-LABEL: sra_known_nonzero_exact:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_known_nonzero_exact:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_known_nonzero_exact:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = ashr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -510,47 +844,78 @@ define i32 @sra_known_nonzero_exact(i32 %x, i32 %yy) {
}
define i32 @sra_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: sra_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: sarl %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB28_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB28_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sra_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB28_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB28_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sra_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: sarl %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB28_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB28_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = ashr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @srl_known_nonzero_sign_bit_set(i32 %x) {
-; CHECK-LABEL: srl_known_nonzero_sign_bit_set:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_known_nonzero_sign_bit_set:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_known_nonzero_sign_bit_set:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%z = lshr i32 2147606891, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @srl_known_nonzero_exact(i32 %x, i32 %yy) {
-; CHECK-LABEL: srl_known_nonzero_exact:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %esi
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_known_nonzero_exact:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_known_nonzero_exact:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %esi
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = lshr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -558,33 +923,56 @@ define i32 @srl_known_nonzero_exact(i32 %x, i32 %yy) {
}
define i32 @srl_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: srl_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shrl %cl, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB31_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB31_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: srl_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB31_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB31_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: srl_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shrl %cl, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB31_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB31_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = lshr exact i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @udiv_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: udiv_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %esi
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: udiv_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl {{[0-9]+}}(%esp)
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: udiv_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %esi
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%z = udiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -592,33 +980,56 @@ define i32 @udiv_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @udiv_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: udiv_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %esi
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB33_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB33_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: udiv_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: divl {{[0-9]+}}(%esp)
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB33_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB33_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: udiv_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: divl %esi
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB33_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB33_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = udiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sdiv_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: sdiv_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %esi
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sdiv_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: cltd
+; X86-NEXT: idivl {{[0-9]+}}(%esp)
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sdiv_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: cltd
+; X64-NEXT: idivl %esi
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%z = sdiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -626,31 +1037,53 @@ define i32 @sdiv_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @sdiv_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: sdiv_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %esi
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB35_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB35_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sdiv_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cltd
+; X86-NEXT: idivl {{[0-9]+}}(%esp)
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB35_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB35_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sdiv_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: cltd
+; X64-NEXT: idivl %esi
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB35_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB35_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sdiv exact i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @add_known_nonzero(i32 %xx, i32 %y) {
-; CHECK-LABEL: add_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: add_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: add_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 1
%z = add nuw i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -658,17 +1091,30 @@ define i32 @add_known_nonzero(i32 %xx, i32 %y) {
}
define i32 @add_maybe_zero(i32 %xx, i32 %y) {
-; CHECK-LABEL: add_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $1, %edi
-; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: je .LBB37_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB37_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: add_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: je .LBB37_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB37_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: add_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: orl $1, %edi
+; X64-NEXT: addl %esi, %edi
+; X64-NEXT: je .LBB37_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB37_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 1
%z = add nsw i32 %x, %y
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -676,15 +1122,24 @@ define i32 @add_maybe_zero(i32 %xx, i32 %y) {
}
define i32 @sub_known_nonzero_neg_case(i32 %xx) {
-; CHECK-LABEL: sub_known_nonzero_neg_case:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: negl %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_known_nonzero_neg_case:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_known_nonzero_neg_case:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: negl %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i32 256, %xx
%z = sub i32 0, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -692,14 +1147,24 @@ define i32 @sub_known_nonzero_neg_case(i32 %xx) {
}
define i32 @sub_known_nonzero_ne_case(i32 %xx, i32 %yy) {
-; CHECK-LABEL: sub_known_nonzero_ne_case:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: andl $-65, %edi
-; CHECK-NEXT: subl %eax, %edi
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_known_nonzero_ne_case:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: orl $64, %ecx
+; X86-NEXT: andl $-65, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_known_nonzero_ne_case:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: andl $-65, %edi
+; X64-NEXT: subl %eax, %edi
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
%x = or i32 %xx, 64
%y = and i32 %xx, -65
%z = sub i32 %y, %x
@@ -708,18 +1173,32 @@ define i32 @sub_known_nonzero_ne_case(i32 %xx, i32 %yy) {
}
define i32 @sub_maybe_zero(i32 %x) {
-; CHECK-LABEL: sub_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: orl $64, %eax
-; CHECK-NEXT: subl %edi, %eax
-; CHECK-NEXT: je .LBB40_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB40_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: orl $64, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: je .LBB40_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB40_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: orl $64, %eax
+; X64-NEXT: subl %edi, %eax
+; X64-NEXT: je .LBB40_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB40_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %x, 64
%z = sub i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -727,34 +1206,60 @@ define i32 @sub_maybe_zero(i32 %x) {
}
define i32 @sub_maybe_zero2(i32 %x) {
-; CHECK-LABEL: sub_maybe_zero2:
-; CHECK: # %bb.0:
-; CHECK-NEXT: negl %edi
-; CHECK-NEXT: je .LBB41_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB41_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sub_maybe_zero2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: je .LBB41_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB41_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sub_maybe_zero2:
+; X64: # %bb.0:
+; X64-NEXT: negl %edi
+; X64-NEXT: je .LBB41_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB41_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sub i32 0, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @mul_known_nonzero_nsw(i32 %x, i32 %yy) {
-; CHECK-LABEL: mul_known_nonzero_nsw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: imull %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB42_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB42_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_known_nonzero_nsw:
+; X86: # %bb.0:
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB42_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB42_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_known_nonzero_nsw:
+; X64: # %bb.0:
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: imull %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB42_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB42_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = mul nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -762,18 +1267,32 @@ define i32 @mul_known_nonzero_nsw(i32 %x, i32 %yy) {
}
define i32 @mul_known_nonzero_nuw(i32 %x, i32 %yy) {
-; CHECK-LABEL: mul_known_nonzero_nuw:
-; CHECK: # %bb.0:
-; CHECK-NEXT: orl $256, %esi # imm = 0x100
-; CHECK-NEXT: imull %edi, %esi
-; CHECK-NEXT: testl %esi, %esi
-; CHECK-NEXT: je .LBB43_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %esi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB43_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_known_nonzero_nuw:
+; X86: # %bb.0:
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB43_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB43_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_known_nonzero_nuw:
+; X64: # %bb.0:
+; X64-NEXT: orl $256, %esi # imm = 0x100
+; X64-NEXT: imull %edi, %esi
+; X64-NEXT: testl %esi, %esi
+; X64-NEXT: je .LBB43_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %esi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB43_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%y = or i32 %yy, 256
%z = mul nuw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -781,36 +1300,63 @@ define i32 @mul_known_nonzero_nuw(i32 %x, i32 %yy) {
}
define i32 @mul_maybe_zero(i32 %x, i32 %y) {
-; CHECK-LABEL: mul_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: imull %esi, %edi
-; CHECK-NEXT: testl %edi, %edi
-; CHECK-NEXT: je .LBB44_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %edi, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB44_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: mul_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB44_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB44_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: mul_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: imull %esi, %edi
+; X64-NEXT: testl %edi, %edi
+; X64-NEXT: je .LBB44_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %edi, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB44_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = mul nuw nsw i32 %y, %x
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @bitcast_known_nonzero(<2 x i16> %xx) {
-; CHECK-LABEL: bitcast_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; CHECK-NEXT: pslld $23, %xmm0
-; CHECK-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT: cvttps2dq %xmm0, %xmm0
-; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; CHECK-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: bsfl %eax, %ecx
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: cmovnel %ecx, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; X86-NEXT: pslld $23, %xmm0
+; X86-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: cvttps2dq %xmm0, %xmm0
+; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; X86-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: bsfl %eax, %ecx
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: cmovnel %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; X64-NEXT: vpslld $23, %xmm0, %xmm0
+; X64-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vcvttps2dq %xmm0, %xmm0
+; X64-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; X64-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: bsfl %eax, %ecx
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: cmovnel %ecx, %eax
+; X64-NEXT: retq
%x = shl nuw nsw <2 x i16> <i16 256, i16 256>, %xx
%z = bitcast <2 x i16> %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -818,49 +1364,83 @@ define i32 @bitcast_known_nonzero(<2 x i16> %xx) {
}
define i32 @bitcast_maybe_zero(<2 x i16> %x) {
-; CHECK-LABEL: bitcast_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB46_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB46_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB46_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB46_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB46_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB46_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = bitcast <2 x i16> %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @bitcast_from_float(float %x) {
-; CHECK-LABEL: bitcast_from_float:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: je .LBB47_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB47_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: bitcast_from_float:
+; X86: # %bb.0:
+; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB47_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB47_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bitcast_from_float:
+; X64: # %bb.0:
+; X64-NEXT: vmovd %xmm0, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: je .LBB47_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB47_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = bitcast float %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @zext_known_nonzero(i16 %xx) {
-; CHECK-LABEL: zext_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: zext_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movzwl %ax, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: zext_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i16 256, %xx
%z = zext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -868,32 +1448,54 @@ define i32 @zext_known_nonzero(i16 %xx) {
}
define i32 @zext_maybe_zero(i16 %x) {
-; CHECK-LABEL: zext_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: testw %di, %di
-; CHECK-NEXT: je .LBB49_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: movzwl %di, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB49_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: zext_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testw %ax, %ax
+; X86-NEXT: je .LBB49_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movzwl %ax, %eax
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB49_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: zext_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: testw %di, %di
+; X64-NEXT: je .LBB49_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: movzwl %di, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB49_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = zext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
}
define i32 @sext_known_nonzero(i16 %xx) {
-; CHECK-LABEL: sext_known_nonzero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: movl $256, %eax # imm = 0x100
-; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT: shll %cl, %eax
-; CHECK-NEXT: cwtl
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sext_known_nonzero:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $256, %eax # imm = 0x100
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: cwtl
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sext_known_nonzero:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %ecx
+; X64-NEXT: movl $256, %eax # imm = 0x100
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: cwtl
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
%x = shl nuw nsw i16 256, %xx
%z = sext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
@@ -901,17 +1503,29 @@ define i32 @sext_known_nonzero(i16 %xx) {
}
define i32 @sext_maybe_zero(i16 %x) {
-; CHECK-LABEL: sext_maybe_zero:
-; CHECK: # %bb.0:
-; CHECK-NEXT: testw %di, %di
-; CHECK-NEXT: je .LBB51_1
-; CHECK-NEXT: # %bb.2: # %cond.false
-; CHECK-NEXT: movswl %di, %eax
-; CHECK-NEXT: rep bsfl %eax, %eax
-; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB51_1:
-; CHECK-NEXT: movl $32, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: sext_maybe_zero:
+; X86: # %bb.0:
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: je .LBB51_1
+; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: rep bsfl %eax, %eax
+; X86-NEXT: retl
+; X86-NEXT: .LBB51_1:
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: sext_maybe_zero:
+; X64: # %bb.0:
+; X64-NEXT: testw %di, %di
+; X64-NEXT: je .LBB51_1
+; X64-NEXT: # %bb.2: # %cond.false
+; X64-NEXT: movswl %di, %eax
+; X64-NEXT: rep bsfl %eax, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB51_1:
+; X64-NEXT: movl $32, %eax
+; X64-NEXT: retq
%z = sext i16 %x to i32
%r = call i32 @llvm.cttz.i32(i32 %z, i1 false)
ret i32 %r
More information about the llvm-commits
mailing list