[llvm] r345640 - [x86] try to make test immune to better div optimization; NFCI

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 30 13:44:54 PDT 2018


Author: spatel
Date: Tue Oct 30 13:44:54 2018
New Revision: 345640

URL: http://llvm.org/viewvc/llvm-project?rev=345640&view=rev
Log:
[x86] try to make test immune to better div optimization; NFCI

Modified:
    llvm/trunk/test/CodeGen/X86/copy-eflags.ll

Modified: llvm/trunk/test/CodeGen/X86/copy-eflags.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/copy-eflags.ll?rev=345640&r1=345639&r2=345640&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/copy-eflags.ll (original)
+++ llvm/trunk/test/CodeGen/X86/copy-eflags.ll Tue Oct 30 13:44:54 2018
@@ -308,47 +308,46 @@ bb1:
 ; Use a particular instruction pattern in order to lower to the post-RA pseudo
 ; used to lower SETB into an SBB pattern in order to make sure that kind of
 ; usage of a copied EFLAGS continues to work.
-define void @PR37431(i32* %arg1, i8* %arg2, i8* %arg3) {
+define void @PR37431(i32* %arg1, i8* %arg2, i8* %arg3, i32 %x) nounwind {
 ; X32-LABEL: PR37431:
 ; X32:       # %bb.0: # %entry
+; X32-NEXT:    pushl %edi
 ; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    .cfi_offset %esi, -8
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movl (%eax), %eax
 ; X32-NEXT:    movl %eax, %ecx
 ; X32-NEXT:    sarl $31, %ecx
 ; X32-NEXT:    cmpl %eax, %eax
 ; X32-NEXT:    sbbl %ecx, %eax
-; X32-NEXT:    setb %al
-; X32-NEXT:    sbbb %cl, %cl
+; X32-NEXT:    setb %cl
+; X32-NEXT:    sbbb %dl, %dl
 ; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movb %cl, (%edx)
-; X32-NEXT:    movzbl %al, %eax
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    subl %eax, %ecx
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    idivl %ecx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT:    movb %dl, (%edi)
+; X32-NEXT:    movzbl %cl, %ecx
+; X32-NEXT:    xorl %edi, %edi
+; X32-NEXT:    subl %ecx, %edi
+; X32-NEXT:    cltd
+; X32-NEXT:    idivl %edi
 ; X32-NEXT:    movb %dl, (%esi)
 ; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 4
+; X32-NEXT:    popl %edi
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: PR37431:
 ; X64:       # %bb.0: # %entry
-; X64-NEXT:    movq %rdx, %rcx
-; X64-NEXT:    movslq (%rdi), %rax
-; X64-NEXT:    cmpq %rax, %rax
-; X64-NEXT:    sbbb %dl, %dl
-; X64-NEXT:    cmpq %rax, %rax
-; X64-NEXT:    movb %dl, (%rsi)
-; X64-NEXT:    sbbl %esi, %esi
-; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    xorl %edx, %edx
-; X64-NEXT:    idivl %esi
-; X64-NEXT:    movb %dl, (%rcx)
+; X64-NEXT:    movl %ecx, %eax
+; X64-NEXT:    movq %rdx, %r8
+; X64-NEXT:    movslq (%rdi), %rdx
+; X64-NEXT:    cmpq %rdx, %rax
+; X64-NEXT:    sbbb %cl, %cl
+; X64-NEXT:    cmpq %rdx, %rax
+; X64-NEXT:    movb %cl, (%rsi)
+; X64-NEXT:    sbbl %ecx, %ecx
+; X64-NEXT:    cltd
+; X64-NEXT:    idivl %ecx
+; X64-NEXT:    movb %dl, (%r8)
 ; X64-NEXT:    retq
 entry:
   %tmp = load i32, i32* %arg1
@@ -358,7 +357,7 @@ entry:
   %tmp4 = sub i8 0, %tmp3
   store i8 %tmp4, i8* %arg2
   %tmp5 = sext i8 %tmp4 to i32
-  %tmp6 = srem i32 0, %tmp5
+  %tmp6 = srem i32 %x, %tmp5
   %tmp7 = trunc i32 %tmp6 to i8
   store i8 %tmp7, i8* %arg3
   ret void




More information about the llvm-commits mailing list