[llvm] bce3da9 - [X86] Add 32-bit test coverage to or-lea.ll
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 8 05:36:21 PDT 2022
Author: Simon Pilgrim
Date: 2022-08-08T13:31:06+01:00
New Revision: bce3da9b8996541dbfc42c27387ff0f34cbb5353
URL: https://github.com/llvm/llvm-project/commit/bce3da9b8996541dbfc42c27387ff0f34cbb5353
DIFF: https://github.com/llvm/llvm-project/commit/bce3da9b8996541dbfc42c27387ff0f34cbb5353.diff
LOG: [X86] Add 32-bit test coverage to or-lea.ll
Noticed while triaging D131358
Added:
Modified:
llvm/test/CodeGen/X86/or-lea.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/or-lea.ll b/llvm/test/CodeGen/X86/or-lea.ll
index 8cc1a45f3cd2..93cb5faf09c9 100644
--- a/llvm/test/CodeGen/X86/or-lea.ll
+++ b/llvm/test/CodeGen/X86/or-lea.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=CHECK,NOBMI
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi | FileCheck %s --check-prefixes=CHECK,BMI
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64,NOBMI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi | FileCheck %s --check-prefixes=X64,BMI
; InstCombine and DAGCombiner transform an 'add' into an 'or'
; if there are no common bits from the incoming operands.
@@ -8,14 +9,21 @@
; transform and reduce add/shift/or instruction counts.
define i32 @or_shift1_and1(i32 %x, i32 %y) {
-; CHECK-LABEL: or_shift1_and1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
-; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT: andl $1, %esi
-; CHECK-NEXT: leal (%rsi,%rdi,2), %eax
-; CHECK-NEXT: retq
-
+; X86-LABEL: or_shift1_and1:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $1, %ecx
+; X86-NEXT: leal (%ecx,%eax,2), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_shift1_and1:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: andl $1, %esi
+; X64-NEXT: leal (%rsi,%rdi,2), %eax
+; X64-NEXT: retq
%shl = shl i32 %x, 1
%and = and i32 %y, 1
%or = or i32 %and, %shl
@@ -23,14 +31,21 @@ define i32 @or_shift1_and1(i32 %x, i32 %y) {
}
define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) {
-; CHECK-LABEL: or_shift1_and1_swapped:
-; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
-; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT: andl $1, %esi
-; CHECK-NEXT: leal (%rsi,%rdi,2), %eax
-; CHECK-NEXT: retq
-
+; X86-LABEL: or_shift1_and1_swapped:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $1, %ecx
+; X86-NEXT: leal (%ecx,%eax,2), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_shift1_and1_swapped:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: andl $1, %esi
+; X64-NEXT: leal (%rsi,%rdi,2), %eax
+; X64-NEXT: retq
%shl = shl i32 %x, 1
%and = and i32 %y, 1
%or = or i32 %shl, %and
@@ -38,14 +53,21 @@ define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) {
}
define i32 @or_shift2_and1(i32 %x, i32 %y) {
-; CHECK-LABEL: or_shift2_and1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
-; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT: andl $1, %esi
-; CHECK-NEXT: leal (%rsi,%rdi,4), %eax
-; CHECK-NEXT: retq
-
+; X86-LABEL: or_shift2_and1:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $1, %ecx
+; X86-NEXT: leal (%ecx,%eax,4), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_shift2_and1:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: andl $1, %esi
+; X64-NEXT: leal (%rsi,%rdi,4), %eax
+; X64-NEXT: retq
%shl = shl i32 %x, 2
%and = and i32 %y, 1
%or = or i32 %shl, %and
@@ -53,14 +75,21 @@ define i32 @or_shift2_and1(i32 %x, i32 %y) {
}
define i32 @or_shift3_and1(i32 %x, i32 %y) {
-; CHECK-LABEL: or_shift3_and1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
-; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT: andl $1, %esi
-; CHECK-NEXT: leal (%rsi,%rdi,8), %eax
-; CHECK-NEXT: retq
-
+; X86-LABEL: or_shift3_and1:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $1, %ecx
+; X86-NEXT: leal (%ecx,%eax,8), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_shift3_and1:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: andl $1, %esi
+; X64-NEXT: leal (%rsi,%rdi,8), %eax
+; X64-NEXT: retq
%shl = shl i32 %x, 3
%and = and i32 %y, 1
%or = or i32 %shl, %and
@@ -68,14 +97,21 @@ define i32 @or_shift3_and1(i32 %x, i32 %y) {
}
define i32 @or_shift3_and7(i32 %x, i32 %y) {
-; CHECK-LABEL: or_shift3_and7:
-; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
-; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT: andl $7, %esi
-; CHECK-NEXT: leal (%rsi,%rdi,8), %eax
-; CHECK-NEXT: retq
-
+; X86-LABEL: or_shift3_and7:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $7, %ecx
+; X86-NEXT: leal (%ecx,%eax,8), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_shift3_and7:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: andl $7, %esi
+; X64-NEXT: leal (%rsi,%rdi,8), %eax
+; X64-NEXT: retq
%shl = shl i32 %x, 3
%and = and i32 %y, 7
%or = or i32 %shl, %and
@@ -85,15 +121,23 @@ define i32 @or_shift3_and7(i32 %x, i32 %y) {
; The shift is too big for an LEA.
define i32 @or_shift4_and1(i32 %x, i32 %y) {
-; CHECK-LABEL: or_shift4_and1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
-; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT: shll $4, %edi
-; CHECK-NEXT: andl $1, %esi
-; CHECK-NEXT: leal (%rsi,%rdi), %eax
-; CHECK-NEXT: retq
-
+; X86-LABEL: or_shift4_and1:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll $4, %ecx
+; X86-NEXT: andl $1, %eax
+; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_shift4_and1:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: shll $4, %edi
+; X64-NEXT: andl $1, %esi
+; X64-NEXT: leal (%rsi,%rdi), %eax
+; X64-NEXT: retq
%shl = shl i32 %x, 4
%and = and i32 %y, 1
%or = or i32 %shl, %and
@@ -103,14 +147,22 @@ define i32 @or_shift4_and1(i32 %x, i32 %y) {
; The mask is too big for the shift, so the 'or' isn't equivalent to an 'add'.
define i32 @or_shift3_and8(i32 %x, i32 %y) {
-; CHECK-LABEL: or_shift3_and8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT: leal (,%rdi,8), %eax
-; CHECK-NEXT: andl $8, %esi
-; CHECK-NEXT: orl %esi, %eax
-; CHECK-NEXT: retq
-
+; X86-LABEL: or_shift3_and8:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll $3, %ecx
+; X86-NEXT: andl $8, %eax
+; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_shift3_and8:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal (,%rdi,8), %eax
+; X64-NEXT: andl $8, %esi
+; X64-NEXT: orl %esi, %eax
+; X64-NEXT: retq
%shl = shl i32 %x, 3
%and = and i32 %y, 8
%or = or i32 %shl, %and
@@ -120,12 +172,21 @@ define i32 @or_shift3_and8(i32 %x, i32 %y) {
; 64-bit operands should work too.
define i64 @or_shift1_and1_64(i64 %x, i64 %y) {
-; CHECK-LABEL: or_shift1_and1_64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: andl $1, %esi
-; CHECK-NEXT: leaq (%rsi,%rdi,2), %rax
-; CHECK-NEXT: retq
-
+; X86-LABEL: or_shift1_and1_64:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shldl $1, %ecx, %edx
+; X86-NEXT: andl $1, %eax
+; X86-NEXT: leal (%eax,%ecx,2), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_shift1_and1_64:
+; X64: # %bb.0:
+; X64-NEXT: andl $1, %esi
+; X64-NEXT: leaq (%rsi,%rdi,2), %rax
+; X64-NEXT: retq
%shl = shl i64 %x, 1
%and = and i64 %y, 1
%or = or i64 %and, %shl
@@ -135,6 +196,16 @@ define i64 @or_shift1_and1_64(i64 %x, i64 %y) {
; In the following patterns, lhs and rhs of the or instruction have no common bits.
define i32 @or_and_and_rhs_neg_i32(i32 %x, i32 %y, i32 %z) {
+; X86-LABEL: or_and_and_rhs_neg_i32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: retl
+;
; NOBMI-LABEL: or_and_and_rhs_neg_i32:
; NOBMI: # %bb.0: # %entry
; NOBMI-NEXT: # kill: def $edx killed $edx def $rdx
@@ -161,6 +232,16 @@ entry:
}
define i32 @or_and_and_lhs_neg_i32(i32 %x, i32 %y, i32 %z) {
+; X86-LABEL: or_and_and_lhs_neg_i32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: retl
+;
; NOBMI-LABEL: or_and_and_lhs_neg_i32:
; NOBMI: # %bb.0: # %entry
; NOBMI-NEXT: # kill: def $edx killed $edx def $rdx
@@ -187,6 +268,16 @@ entry:
}
define i32 @or_and_rhs_neg_and_i32(i32 %x, i32 %y, i32 %z) {
+; X86-LABEL: or_and_rhs_neg_and_i32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: retl
+;
; NOBMI-LABEL: or_and_rhs_neg_and_i32:
; NOBMI: # %bb.0: # %entry
; NOBMI-NEXT: # kill: def $edi killed $edi def $rdi
@@ -213,6 +304,16 @@ entry:
}
define i32 @or_and_lhs_neg_and_i32(i32 %x, i32 %y, i32 %z) {
+; X86-LABEL: or_and_lhs_neg_and_i32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: retl
+;
; NOBMI-LABEL: or_and_lhs_neg_and_i32:
; NOBMI: # %bb.0: # %entry
; NOBMI-NEXT: # kill: def $edi killed $edi def $rdi
@@ -239,6 +340,22 @@ entry:
}
define i64 @or_and_and_rhs_neg_i64(i64 %x, i64 %y, i64 %z) {
+; X86-LABEL: or_and_and_rhs_neg_i64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: xorl %eax, %edx
+; X86-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: xorl %eax, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: addl $1, %eax
+; X86-NEXT: adcl $0, %edx
+; X86-NEXT: retl
+;
; NOBMI-LABEL: or_and_and_rhs_neg_i64:
; NOBMI: # %bb.0: # %entry
; NOBMI-NEXT: xorq %rdi, %rdx
@@ -263,6 +380,22 @@ entry:
}
define i64 @or_and_and_lhs_neg_i64(i64 %x, i64 %y, i64 %z) {
+; X86-LABEL: or_and_and_lhs_neg_i64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: xorl %eax, %edx
+; X86-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: xorl %eax, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: addl $1, %eax
+; X86-NEXT: adcl $0, %edx
+; X86-NEXT: retl
+;
; NOBMI-LABEL: or_and_and_lhs_neg_i64:
; NOBMI: # %bb.0: # %entry
; NOBMI-NEXT: xorq %rdi, %rdx
@@ -287,6 +420,22 @@ entry:
}
define i64 @or_and_rhs_neg_and_i64(i64 %x, i64 %y, i64 %z) {
+; X86-LABEL: or_and_rhs_neg_and_i64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: xorl %eax, %edx
+; X86-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: xorl %eax, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: addl $1, %eax
+; X86-NEXT: adcl $0, %edx
+; X86-NEXT: retl
+;
; NOBMI-LABEL: or_and_rhs_neg_and_i64:
; NOBMI: # %bb.0: # %entry
; NOBMI-NEXT: xorq %rdx, %rdi
@@ -311,6 +460,22 @@ entry:
}
define i64 @or_and_lhs_neg_and_i64(i64 %x, i64 %y, i64 %z) {
+; X86-LABEL: or_and_lhs_neg_and_i64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: xorl %eax, %edx
+; X86-NEXT: andl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: xorl %eax, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: addl $1, %eax
+; X86-NEXT: adcl $0, %edx
+; X86-NEXT: retl
+;
; NOBMI-LABEL: or_and_lhs_neg_and_i64:
; NOBMI: # %bb.0: # %entry
; NOBMI-NEXT: xorq %rdx, %rdi
@@ -335,14 +500,23 @@ entry:
}
define i32 @or_sext1(i32 %x) {
-; CHECK-LABEL: or_sext1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: cmpl $43, %edi
-; CHECK-NEXT: setge %al
-; CHECK-NEXT: negl %eax
-; CHECK-NEXT: orl $1, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_sext1:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl $43, {{[0-9]+}}(%esp)
+; X86-NEXT: setge %al
+; X86-NEXT: negl %eax
+; X86-NEXT: orl $1, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_sext1:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpl $43, %edi
+; X64-NEXT: setge %al
+; X64-NEXT: negl %eax
+; X64-NEXT: orl $1, %eax
+; X64-NEXT: retq
%cmp = icmp sgt i32 %x, 42
%sext = sext i1 %cmp to i32
%or = or i32 %sext, 1
@@ -350,14 +524,23 @@ define i32 @or_sext1(i32 %x) {
}
define i32 @or_sext2(i32 %x) {
-; CHECK-LABEL: or_sext2:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: cmpl $43, %edi
-; CHECK-NEXT: setge %al
-; CHECK-NEXT: negl %eax
-; CHECK-NEXT: orl $2, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_sext2:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl $43, {{[0-9]+}}(%esp)
+; X86-NEXT: setge %al
+; X86-NEXT: negl %eax
+; X86-NEXT: orl $2, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_sext2:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpl $43, %edi
+; X64-NEXT: setge %al
+; X64-NEXT: negl %eax
+; X64-NEXT: orl $2, %eax
+; X64-NEXT: retq
%cmp = icmp sgt i32 %x, 42
%sext = sext i1 %cmp to i32
%or = or i32 %sext, 2
@@ -365,14 +548,23 @@ define i32 @or_sext2(i32 %x) {
}
define i32 @or_sext3(i32 %x) {
-; CHECK-LABEL: or_sext3:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: cmpl $43, %edi
-; CHECK-NEXT: setge %al
-; CHECK-NEXT: negl %eax
-; CHECK-NEXT: orl $3, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_sext3:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl $43, {{[0-9]+}}(%esp)
+; X86-NEXT: setge %al
+; X86-NEXT: negl %eax
+; X86-NEXT: orl $3, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_sext3:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpl $43, %edi
+; X64-NEXT: setge %al
+; X64-NEXT: negl %eax
+; X64-NEXT: orl $3, %eax
+; X64-NEXT: retq
%cmp = icmp sgt i32 %x, 42
%sext = sext i1 %cmp to i32
%or = or i32 %sext, 3
@@ -380,14 +572,23 @@ define i32 @or_sext3(i32 %x) {
}
define i32 @or_sext4(i32 %x) {
-; CHECK-LABEL: or_sext4:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: cmpl $43, %edi
-; CHECK-NEXT: setge %al
-; CHECK-NEXT: negl %eax
-; CHECK-NEXT: orl $4, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_sext4:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl $43, {{[0-9]+}}(%esp)
+; X86-NEXT: setge %al
+; X86-NEXT: negl %eax
+; X86-NEXT: orl $4, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_sext4:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpl $43, %edi
+; X64-NEXT: setge %al
+; X64-NEXT: negl %eax
+; X64-NEXT: orl $4, %eax
+; X64-NEXT: retq
%cmp = icmp sgt i32 %x, 42
%sext = sext i1 %cmp to i32
%or = or i32 %sext, 4
@@ -395,14 +596,23 @@ define i32 @or_sext4(i32 %x) {
}
define i32 @or_sext7(i32 %x) {
-; CHECK-LABEL: or_sext7:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: cmpl $43, %edi
-; CHECK-NEXT: setge %al
-; CHECK-NEXT: negl %eax
-; CHECK-NEXT: orl $7, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_sext7:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl $43, {{[0-9]+}}(%esp)
+; X86-NEXT: setge %al
+; X86-NEXT: negl %eax
+; X86-NEXT: orl $7, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_sext7:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpl $43, %edi
+; X64-NEXT: setge %al
+; X64-NEXT: negl %eax
+; X64-NEXT: orl $7, %eax
+; X64-NEXT: retq
%cmp = icmp sgt i32 %x, 42
%sext = sext i1 %cmp to i32
%or = or i32 %sext, 7
@@ -410,14 +620,23 @@ define i32 @or_sext7(i32 %x) {
}
define i32 @or_sext8(i32 %x) {
-; CHECK-LABEL: or_sext8:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: cmpl $43, %edi
-; CHECK-NEXT: setge %al
-; CHECK-NEXT: negl %eax
-; CHECK-NEXT: orl $8, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: or_sext8:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl $43, {{[0-9]+}}(%esp)
+; X86-NEXT: setge %al
+; X86-NEXT: negl %eax
+; X86-NEXT: orl $8, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: or_sext8:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpl $43, %edi
+; X64-NEXT: setge %al
+; X64-NEXT: negl %eax
+; X64-NEXT: orl $8, %eax
+; X64-NEXT: retq
%cmp = icmp sgt i32 %x, 42
%sext = sext i1 %cmp to i32
%or = or i32 %sext, 8
More information about the llvm-commits
mailing list