[llvm] 039a88b - [X86] Add tests showing failure to use LEA to avoid spoiling EFLAGS from smulo

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 10 09:22:02 PST 2022


Author: Simon Pilgrim
Date: 2022-02-10T17:21:44Z
New Revision: 039a88be56b443156a626a0fe75c742216394b3a

URL: https://github.com/llvm/llvm-project/commit/039a88be56b443156a626a0fe75c742216394b3a
DIFF: https://github.com/llvm/llvm-project/commit/039a88be56b443156a626a0fe75c742216394b3a.diff

LOG: [X86] Add tests showing failure to use LEA to avoid spoiling EFLAGS from smulo

Add smulo and umulo test coverage

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/select-lea.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/select-lea.ll b/llvm/test/CodeGen/X86/select-lea.ll
index a9dd3f6402728..894b5b4fde14b 100644
--- a/llvm/test/CodeGen/X86/select-lea.ll
+++ b/llvm/test/CodeGen/X86/select-lea.ll
@@ -327,7 +327,91 @@ define i32 @usub_add_load(i32 %x, i32 %y, i32* %pz) nounwind {
   ret i32 %r
 }
 
+define i32 @smul_add_imm(i32 %x, i32 %y) {
+; X64-LABEL: smul_add_imm:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    imull %esi, %eax
+; X64-NEXT:    addl $100, %eax
+; X64-NEXT:    imull %esi, %edi
+; X64-NEXT:    cmovnol %edi, %eax
+; X64-NEXT:    retq
+;
+; CMOV-LABEL: smul_add_imm:
+; CMOV:       # %bb.0:
+; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CMOV-NEXT:    movl %eax, %edx
+; CMOV-NEXT:    imull %ecx, %edx
+; CMOV-NEXT:    addl $100, %edx
+; CMOV-NEXT:    imull %ecx, %eax
+; CMOV-NEXT:    cmovol %edx, %eax
+; CMOV-NEXT:    retl
+;
+; NOCMOV-LABEL: smul_add_imm:
+; NOCMOV:       # %bb.0:
+; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; NOCMOV-NEXT:    movl %eax, %ecx
+; NOCMOV-NEXT:    imull %edx, %ecx
+; NOCMOV-NEXT:    imull %edx, %eax
+; NOCMOV-NEXT:    jno .LBB8_2
+; NOCMOV-NEXT:  # %bb.1:
+; NOCMOV-NEXT:    addl $100, %ecx
+; NOCMOV-NEXT:    movl %ecx, %eax
+; NOCMOV-NEXT:  .LBB8_2:
+; NOCMOV-NEXT:    retl
+  %o = tail call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %x, i32 %y)
+  %v1 = extractvalue { i32, i1 } %o, 1
+  %v2 = extractvalue { i32, i1 } %o, 0
+  %a = add i32 %v2, 100
+  %r = select i1 %v1, i32 %a, i32 %v2
+  ret i32 %r
+}
+
+define i32 @umul_add_imm(i32 %x, i32 %y) {
+; X64-LABEL: umul_add_imm:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    mull %esi
+; X64-NEXT:    # kill: def $eax killed $eax def $rax
+; X64-NEXT:    seto %cl
+; X64-NEXT:    leal 100(%rax), %edx
+; X64-NEXT:    testb %cl, %cl
+; X64-NEXT:    cmovnel %edx, %eax
+; X64-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NEXT:    retq
+;
+; CMOV-LABEL: umul_add_imm:
+; CMOV:       # %bb.0:
+; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CMOV-NEXT:    mull {{[0-9]+}}(%esp)
+; CMOV-NEXT:    seto %cl
+; CMOV-NEXT:    leal 100(%eax), %edx
+; CMOV-NEXT:    testb %cl, %cl
+; CMOV-NEXT:    cmovnel %edx, %eax
+; CMOV-NEXT:    retl
+;
+; NOCMOV-LABEL: umul_add_imm:
+; NOCMOV:       # %bb.0:
+; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; NOCMOV-NEXT:    mull {{[0-9]+}}(%esp)
+; NOCMOV-NEXT:    jno .LBB9_2
+; NOCMOV-NEXT:  # %bb.1:
+; NOCMOV-NEXT:    addl $100, %eax
+; NOCMOV-NEXT:  .LBB9_2:
+; NOCMOV-NEXT:    retl
+  %o = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+  %v1 = extractvalue { i32, i1 } %o, 1
+  %v2 = extractvalue { i32, i1 } %o, 0
+  %a = add i32 %v2, 100
+  %r = select i1 %v1, i32 %a, i32 %v2
+  ret i32 %r
+}
+
 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
 declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32)
 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32)
+declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
+declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)


        


More information about the llvm-commits mailing list