[llvm] a342f98 - [X86] dagcombine-shifts.ll - add i686 test coverage
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 17 05:35:26 PDT 2023
Author: Simon Pilgrim
Date: 2023-08-17T13:35:05+01:00
New Revision: a342f9802b4115f0e80c42d24765bfbc316d2eea
URL: https://github.com/llvm/llvm-project/commit/a342f9802b4115f0e80c42d24765bfbc316d2eea
DIFF: https://github.com/llvm/llvm-project/commit/a342f9802b4115f0e80c42d24765bfbc316d2eea.diff
LOG: [X86] dagcombine-shifts.ll - add i686 test coverage
Added:
Modified:
llvm/test/CodeGen/X86/dagcombine-shifts.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
index 76fc763b24a515..6bfda6827d520a 100644
--- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64
; fold (shl (zext (lshr (A, X))), X) -> (zext (shl (lshr (A, X)), X))
@@ -11,12 +12,19 @@
; and if there is only one use of the zext.
define i16 @fun1(i8 zeroext %v) {
-; CHECK-LABEL: fun1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: andl $-16, %eax
-; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
-; CHECK-NEXT: retq
+; X86-LABEL: fun1:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $-16, %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: retl
+;
+; X64-LABEL: fun1:
+; X64: # %bb.0: # %entry
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $-16, %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NEXT: retq
entry:
%shr = lshr i8 %v, 4
%ext = zext i8 %shr to i16
@@ -25,11 +33,17 @@ entry:
}
define i32 @fun2(i8 zeroext %v) {
-; CHECK-LABEL: fun2:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: andl $-16, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: fun2:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $-16, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: fun2:
+; X64: # %bb.0: # %entry
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $-16, %eax
+; X64-NEXT: retq
entry:
%shr = lshr i8 %v, 4
%ext = zext i8 %shr to i32
@@ -38,11 +52,17 @@ entry:
}
define i32 @fun3(i16 zeroext %v) {
-; CHECK-LABEL: fun3:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: andl $-16, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: fun3:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $-16, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: fun3:
+; X64: # %bb.0: # %entry
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $-16, %eax
+; X64-NEXT: retq
entry:
%shr = lshr i16 %v, 4
%ext = zext i16 %shr to i32
@@ -51,11 +71,18 @@ entry:
}
define i64 @fun4(i8 zeroext %v) {
-; CHECK-LABEL: fun4:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: andl $-16, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: fun4:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $-16, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: fun4:
+; X64: # %bb.0: # %entry
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $-16, %eax
+; X64-NEXT: retq
entry:
%shr = lshr i8 %v, 4
%ext = zext i8 %shr to i64
@@ -64,11 +91,18 @@ entry:
}
define i64 @fun5(i16 zeroext %v) {
-; CHECK-LABEL: fun5:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: andl $-16, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: fun5:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $-16, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: fun5:
+; X64: # %bb.0: # %entry
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $-16, %eax
+; X64-NEXT: retq
entry:
%shr = lshr i16 %v, 4
%ext = zext i16 %shr to i64
@@ -77,11 +111,18 @@ entry:
}
define i64 @fun6(i32 zeroext %v) {
-; CHECK-LABEL: fun6:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: andl $-16, %eax
-; CHECK-NEXT: retq
+; X86-LABEL: fun6:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $-16, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: fun6:
+; X64: # %bb.0: # %entry
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $-16, %eax
+; X64-NEXT: retq
entry:
%shr = lshr i32 %v, 4
%ext = zext i32 %shr to i64
@@ -92,12 +133,21 @@ entry:
; Don't fold the pattern if we use arithmetic shifts.
define i64 @fun7(i8 zeroext %v) {
-; CHECK-LABEL: fun7:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: sarb $4, %dil
-; CHECK-NEXT: movzbl %dil, %eax
-; CHECK-NEXT: shlq $4, %rax
-; CHECK-NEXT: retq
+; X86-LABEL: fun7:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarb $4, %al
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: shll $4, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: fun7:
+; X64: # %bb.0: # %entry
+; X64-NEXT: sarb $4, %dil
+; X64-NEXT: movzbl %dil, %eax
+; X64-NEXT: shlq $4, %rax
+; X64-NEXT: retq
entry:
%shr = ashr i8 %v, 4
%ext = zext i8 %shr to i64
@@ -106,13 +156,20 @@ entry:
}
define i64 @fun8(i16 zeroext %v) {
-; CHECK-LABEL: fun8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movswl %di, %eax
-; CHECK-NEXT: shrl $4, %eax
-; CHECK-NEXT: movzwl %ax, %eax
-; CHECK-NEXT: shlq $4, %rax
-; CHECK-NEXT: retq
+; X86-LABEL: fun8:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $1048560, %eax # imm = 0xFFFF0
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: fun8:
+; X64: # %bb.0: # %entry
+; X64-NEXT: movswl %di, %eax
+; X64-NEXT: shrl $4, %eax
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: shlq $4, %rax
+; X64-NEXT: retq
entry:
%shr = ashr i16 %v, 4
%ext = zext i16 %shr to i64
@@ -121,12 +178,21 @@ entry:
}
define i64 @fun9(i32 zeroext %v) {
-; CHECK-LABEL: fun9:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: sarl $4, %eax
-; CHECK-NEXT: shlq $4, %rax
-; CHECK-NEXT: retq
+; X86-LABEL: fun9:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: sarl $4, %edx
+; X86-NEXT: andl $-16, %eax
+; X86-NEXT: shrl $28, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: fun9:
+; X64: # %bb.0: # %entry
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: sarl $4, %eax
+; X64-NEXT: shlq $4, %rax
+; X64-NEXT: retq
entry:
%shr = ashr i32 %v, 4
%ext = zext i32 %shr to i64
@@ -138,14 +204,25 @@ entry:
; operand in input to the shift left.
define i64 @fun10(i8 zeroext %v) {
-; CHECK-LABEL: fun10:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: shrb $4, %dil
-; CHECK-NEXT: movzbl %dil, %ecx
-; CHECK-NEXT: movq %rcx, %rax
-; CHECK-NEXT: shlq $4, %rax
-; CHECK-NEXT: orq %rcx, %rax
-; CHECK-NEXT: retq
+; X86-LABEL: fun10:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrb $4, %al
+; X86-NEXT: movzbl %al, %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shll $4, %eax
+; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: fun10:
+; X64: # %bb.0: # %entry
+; X64-NEXT: shrb $4, %dil
+; X64-NEXT: movzbl %dil, %ecx
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: shlq $4, %rax
+; X64-NEXT: orq %rcx, %rax
+; X64-NEXT: retq
entry:
%shr = lshr i8 %v, 4
%ext = zext i8 %shr to i64
@@ -155,14 +232,24 @@ entry:
}
define i64 @fun11(i16 zeroext %v) {
-; CHECK-LABEL: fun11:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT: shrl $4, %edi
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: shlq $4, %rax
-; CHECK-NEXT: addq %rdi, %rax
-; CHECK-NEXT: retq
+; X86-LABEL: fun11:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: shrl $4, %ecx
+; X86-NEXT: andl $-16, %eax
+; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: retl
+;
+; X64-LABEL: fun11:
+; X64: # %bb.0: # %entry
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: shrl $4, %edi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shlq $4, %rax
+; X64-NEXT: addq %rdi, %rax
+; X64-NEXT: retq
entry:
%shr = lshr i16 %v, 4
%ext = zext i16 %shr to i64
@@ -172,14 +259,25 @@ entry:
}
define i64 @fun12(i32 zeroext %v) {
-; CHECK-LABEL: fun12:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT: shrl $4, %edi
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: shlq $4, %rax
-; CHECK-NEXT: addq %rdi, %rax
-; CHECK-NEXT: retq
+; X86-LABEL: fun12:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: shrl $4, %ecx
+; X86-NEXT: andl $-16, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: addl %ecx, %eax
+; X86-NEXT: setb %dl
+; X86-NEXT: retl
+;
+; X64-LABEL: fun12:
+; X64: # %bb.0: # %entry
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: shrl $4, %edi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: shlq $4, %rax
+; X64-NEXT: addq %rdi, %rax
+; X64-NEXT: retq
entry:
%shr = lshr i32 %v, 4
%ext = zext i32 %shr to i64
@@ -199,12 +297,24 @@ entry:
; Verify also that we correctly fold the shl-shr sequence into an
; AND with bitmask.
-define void @g(i32 %a) {
-; CHECK-LABEL: g:
-; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT: andl $-4, %edi
-; CHECK-NEXT: jmp f # TAILCALL
+define void @g(i32 %a) nounwind {
+; X86-LABEL: g:
+; X86: # %bb.0:
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $-4, %eax
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll f
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: g:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: andl $-4, %edi
+; X64-NEXT: jmp f # TAILCALL
%b = lshr i32 %a, 2
%c = zext i32 %b to i64
%d = add i64 %c, 1
More information about the llvm-commits
mailing list