[llvm] d02848b - [X86] or-with-overflow.ll - adjust or_i64_ri constant to not constant fold the icmp

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat May 6 14:43:03 PDT 2023


Author: Simon Pilgrim
Date: 2023-05-06T22:42:14+01:00
New Revision: d02848b2ab3a28e03e2b7bc0d47a8fbff4ed579a

URL: https://github.com/llvm/llvm-project/commit/d02848b2ab3a28e03e2b7bc0d47a8fbff4ed579a
DIFF: https://github.com/llvm/llvm-project/commit/d02848b2ab3a28e03e2b7bc0d47a8fbff4ed579a.diff

LOG: [X86] or-with-overflow.ll - adjust or_i64_ri constant to not constant fold the icmp

Better KnownBits handling of the icmp and/or an upcoming USUBSAT fold would constant fold this test away and prevent us testing for a cleared overflow flag.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/or-with-overflow.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/or-with-overflow.ll b/llvm/test/CodeGen/X86/or-with-overflow.ll
index faee83a988c5..4440485af54b 100644
--- a/llvm/test/CodeGen/X86/or-with-overflow.ll
+++ b/llvm/test/CodeGen/X86/or-with-overflow.ll
@@ -163,29 +163,26 @@ define i64 @or_i64_ri(i64 %0, i64 %1) nounwind {
 ; X86:       # %bb.0:
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    orl $-17, %ecx
+; X86-NEXT:    orl $17, %ecx
 ; X86-NEXT:    cmpl $1, %ecx
-; X86-NEXT:    movl $-1, %edx
-; X86-NEXT:    movl $-1, %esi
+; X86-NEXT:    movl %edx, %esi
 ; X86-NEXT:    sbbl $0, %esi
-; X86-NEXT:    jl .LBB6_1
-; X86-NEXT:  # %bb.2:
+; X86-NEXT:    jl .LBB6_2
+; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB6_1:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:  .LBB6_2:
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: or_i64_ri:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movq %rdi, %rax
-; X64-NEXT:    orq $-17, %rax
+; X64-NEXT:    orq $17, %rax
 ; X64-NEXT:    cmovleq %rdi, %rax
 ; X64-NEXT:    retq
-  %3 = or i64 %0, -17
+  %3 = or i64 %0, 17
   %4 = icmp slt i64 %3, 1
   %5 = select i1 %4, i64 %0, i64 %3
   ret i64 %5


        


More information about the llvm-commits mailing list