[llvm] f8191ee - [X86] Add additional div-mod-pair negative test coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jul 24 07:23:22 PDT 2021


Author: Simon Pilgrim
Date: 2021-07-24T15:21:46+01:00
New Revision: f8191ee32b63cfd6499104df7b3bdc2bc3687eec

URL: https://github.com/llvm/llvm-project/commit/f8191ee32b63cfd6499104df7b3bdc2bc3687eec
DIFF: https://github.com/llvm/llvm-project/commit/f8191ee32b63cfd6499104df7b3bdc2bc3687eec.diff

LOG: [X86] Add additional div-mod-pair negative test coverage

As suggested on D106745

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
    llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
index d8fe4181287f..1a8962f91b88 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
@@ -1054,3 +1054,70 @@ define i32 @negative_
diff erent_x(i32 %x0, i32 %x1, i32 %y, i32* %divdst) nounwin
   %t2 = sub i32 %x1, %t1 ; not %x0
   ret i32 %t2
 }
+
+define i32 @negative_
diff erent_y(i32 %x0, i32 %x1, i32 %y, i32 %z, i32* %divdst) nounwind {
+; X86-LABEL: negative_
diff erent_y:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    cltd
+; X86-NEXT:    idivl {{[0-9]+}}(%esp)
+; X86-NEXT:    movl %eax, (%esi)
+; X86-NEXT:    imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    subl %eax, %ecx
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: negative_
diff erent_y:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edx, %edi
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    cltd
+; X64-NEXT:    idivl %ecx
+; X64-NEXT:    movl %eax, (%r8)
+; X64-NEXT:    imull %eax, %edi
+; X64-NEXT:    subl %edi, %esi
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    retq
+  %div = sdiv i32 %x1, %z ; not %x0
+  store i32 %div, i32* %divdst, align 4
+  %t1 = mul i32 %div, %y
+  %t2 = sub i32 %x1, %t1
+  ret i32 %t2
+}
+
+define i32 @negative_inverted_division(i32 %x0, i32 %x1, i32 %y, i32* %divdst) nounwind {
+; X86-LABEL: negative_inverted_division:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    cltd
+; X86-NEXT:    idivl %ecx
+; X86-NEXT:    movl %eax, (%esi)
+; X86-NEXT:    imull %ecx, %eax
+; X86-NEXT:    subl %eax, %ecx
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: negative_inverted_division:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    cltd
+; X64-NEXT:    idivl %esi
+; X64-NEXT:    movl %eax, (%rcx)
+; X64-NEXT:    imull %esi, %eax
+; X64-NEXT:    subl %eax, %esi
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    retq
+  %div = sdiv i32 %x0, %x1 ; inverted division
+  store i32 %div, i32* %divdst, align 4
+  %t1 = mul i32 %div, %x1
+  %t2 = sub i32 %x1, %t1
+  ret i32 %t2
+}

diff  --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
index 4c134cb8a362..42d7965a1516 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
@@ -1054,3 +1054,70 @@ define i32 @negative_
diff erent_x(i32 %x0, i32 %x1, i32 %y, i32* %divdst) nounwin
   %t2 = sub i32 %x1, %t1 ; not %x0
   ret i32 %t2
 }
+
+define i32 @negative_
diff erent_y(i32 %x0, i32 %x1, i32 %y, i32 %z, i32* %divdst) nounwind {
+; X86-LABEL: negative_
diff erent_y:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    divl {{[0-9]+}}(%esp)
+; X86-NEXT:    movl %eax, (%esi)
+; X86-NEXT:    imull {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    subl %eax, %ecx
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: negative_
diff erent_y:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edx, %edi
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    divl %ecx
+; X64-NEXT:    movl %eax, (%r8)
+; X64-NEXT:    imull %eax, %edi
+; X64-NEXT:    subl %edi, %esi
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    retq
+  %div = udiv i32 %x1, %z ; not %x0
+  store i32 %div, i32* %divdst, align 4
+  %t1 = mul i32 %div, %y
+  %t2 = sub i32 %x1, %t1
+  ret i32 %t2
+}
+
+define i32 @negative_inverted_division(i32 %x0, i32 %x1, i32 %y, i32* %divdst) nounwind {
+; X86-LABEL: negative_inverted_division:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    divl %ecx
+; X86-NEXT:    movl %eax, (%esi)
+; X86-NEXT:    imull %ecx, %eax
+; X86-NEXT:    subl %eax, %ecx
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    popl %esi
+; X86-NEXT:    retl
+;
+; X64-LABEL: negative_inverted_division:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    divl %esi
+; X64-NEXT:    movl %eax, (%rcx)
+; X64-NEXT:    imull %esi, %eax
+; X64-NEXT:    subl %eax, %esi
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    retq
+  %div = udiv i32 %x0, %x1 ; inverted division
+  store i32 %div, i32* %divdst, align 4
+  %t1 = mul i32 %div, %x1
+  %t2 = sub i32 %x1, %t1
+  ret i32 %t2
+}


        


More information about the llvm-commits mailing list