[llvm] 5565b38 - [X86] Add smulo and umulo with add+load test coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 10 09:47:33 PST 2022


Author: Simon Pilgrim
Date: 2022-02-10T17:47:24Z
New Revision: 5565b38a9f211c2c1e9a2433528dbba2d81902ab

URL: https://github.com/llvm/llvm-project/commit/5565b38a9f211c2c1e9a2433528dbba2d81902ab
DIFF: https://github.com/llvm/llvm-project/commit/5565b38a9f211c2c1e9a2433528dbba2d81902ab.diff

LOG: [X86] Add smulo and umulo with add+load test coverage

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/select-lea.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/select-lea.ll b/llvm/test/CodeGen/X86/select-lea.ll
index 894b5b4fde14..487b1f3d3a22 100644
--- a/llvm/test/CodeGen/X86/select-lea.ll
+++ b/llvm/test/CodeGen/X86/select-lea.ll
@@ -369,6 +369,53 @@ define i32 @smul_add_imm(i32 %x, i32 %y) {
   ret i32 %r
 }
 
+define i32 @smul_add_load(i32 %x, i32 %y, i32* %pz) nounwind {
+; X64-LABEL: smul_add_load:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    imull %esi, %eax
+; X64-NEXT:    addl (%rdx), %eax
+; X64-NEXT:    imull %esi, %edi
+; X64-NEXT:    cmovnol %edi, %eax
+; X64-NEXT:    retq
+;
+; CMOV-LABEL: smul_add_load:
+; CMOV:       # %bb.0:
+; CMOV-NEXT:    pushl %esi
+; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; CMOV-NEXT:    movl %eax, %esi
+; CMOV-NEXT:    imull %edx, %esi
+; CMOV-NEXT:    addl (%ecx), %esi
+; CMOV-NEXT:    imull %edx, %eax
+; CMOV-NEXT:    cmovol %esi, %eax
+; CMOV-NEXT:    popl %esi
+; CMOV-NEXT:    retl
+;
+; NOCMOV-LABEL: smul_add_load:
+; NOCMOV:       # %bb.0:
+; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; NOCMOV-NEXT:    movl %eax, %ecx
+; NOCMOV-NEXT:    imull %edx, %ecx
+; NOCMOV-NEXT:    imull %edx, %eax
+; NOCMOV-NEXT:    jno .LBB9_2
+; NOCMOV-NEXT:  # %bb.1:
+; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; NOCMOV-NEXT:    addl (%eax), %ecx
+; NOCMOV-NEXT:    movl %ecx, %eax
+; NOCMOV-NEXT:  .LBB9_2:
+; NOCMOV-NEXT:    retl
+  %o = tail call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %x, i32 %y)
+  %v1 = extractvalue { i32, i1 } %o, 1
+  %v2 = extractvalue { i32, i1 } %o, 0
+  %z = load i32, i32* %pz
+  %a = add i32 %v2, %z
+  %r = select i1 %v1, i32 %a, i32 %v2
+  ret i32 %r
+}
+
 define i32 @umul_add_imm(i32 %x, i32 %y) {
 ; X64-LABEL: umul_add_imm:
 ; X64:       # %bb.0:
@@ -396,10 +443,10 @@ define i32 @umul_add_imm(i32 %x, i32 %y) {
 ; NOCMOV:       # %bb.0:
 ; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; NOCMOV-NEXT:    mull {{[0-9]+}}(%esp)
-; NOCMOV-NEXT:    jno .LBB9_2
+; NOCMOV-NEXT:    jno .LBB10_2
 ; NOCMOV-NEXT:  # %bb.1:
 ; NOCMOV-NEXT:    addl $100, %eax
-; NOCMOV-NEXT:  .LBB9_2:
+; NOCMOV-NEXT:  .LBB10_2:
 ; NOCMOV-NEXT:    retl
   %o = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
   %v1 = extractvalue { i32, i1 } %o, 1
@@ -409,6 +456,50 @@ define i32 @umul_add_imm(i32 %x, i32 %y) {
   ret i32 %r
 }
 
+define i32 @umul_add_load(i32 %x, i32 %y, i32* %pz) nounwind {
+; X64-LABEL: umul_add_load:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rdx, %rcx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    mull %esi
+; X64-NEXT:    seto %dl
+; X64-NEXT:    movl (%rcx), %ecx
+; X64-NEXT:    addl %eax, %ecx
+; X64-NEXT:    testb %dl, %dl
+; X64-NEXT:    cmovnel %ecx, %eax
+; X64-NEXT:    retq
+;
+; CMOV-LABEL: umul_add_load:
+; CMOV:       # %bb.0:
+; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CMOV-NEXT:    mull {{[0-9]+}}(%esp)
+; CMOV-NEXT:    seto %dl
+; CMOV-NEXT:    movl (%ecx), %ecx
+; CMOV-NEXT:    addl %eax, %ecx
+; CMOV-NEXT:    testb %dl, %dl
+; CMOV-NEXT:    cmovnel %ecx, %eax
+; CMOV-NEXT:    retl
+;
+; NOCMOV-LABEL: umul_add_load:
+; NOCMOV:       # %bb.0:
+; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; NOCMOV-NEXT:    mull {{[0-9]+}}(%esp)
+; NOCMOV-NEXT:    jno .LBB11_2
+; NOCMOV-NEXT:  # %bb.1:
+; NOCMOV-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; NOCMOV-NEXT:    addl (%ecx), %eax
+; NOCMOV-NEXT:  .LBB11_2:
+; NOCMOV-NEXT:    retl
+  %o = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
+  %v1 = extractvalue { i32, i1 } %o, 1
+  %v2 = extractvalue { i32, i1 } %o, 0
+  %z = load i32, i32* %pz
+  %a = add i32 %v2, %z
+  %r = select i1 %v1, i32 %a, i32 %v2
+  ret i32 %r
+}
+
 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
 declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32)


        


More information about the llvm-commits mailing list