[llvm-branch-commits] [llvm] 85aaa3e - [X86] Regenerate sdiv_fix_sat.ll + udiv_fix_sat.ll tests

Simon Pilgrim via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Jan 12 09:30:29 PST 2021


Author: Simon Pilgrim
Date: 2021-01-12T17:25:30Z
New Revision: 85aaa3e310c23ec8a375b7a2e2fceee5a72441ef

URL: https://github.com/llvm/llvm-project/commit/85aaa3e310c23ec8a375b7a2e2fceee5a72441ef
DIFF: https://github.com/llvm/llvm-project/commit/85aaa3e310c23ec8a375b7a2e2fceee5a72441ef.diff

LOG: [X86] Regenerate sdiv_fix_sat.ll + udiv_fix_sat.ll tests

Adding missing libcall PLT qualifiers

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/sdiv_fix_sat.ll
    llvm/test/CodeGen/X86/udiv_fix_sat.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/sdiv_fix_sat.ll b/llvm/test/CodeGen/X86/sdiv_fix_sat.ll
index 512488e8f872..617d5d7876bd 100644
--- a/llvm/test/CodeGen/X86/sdiv_fix_sat.ll
+++ b/llvm/test/CodeGen/X86/sdiv_fix_sat.ll
@@ -322,7 +322,7 @@ define i64 @func5(i64 %x, i64 %y) nounwind {
 ; X64-NEXT:    movq %r15, %rdi
 ; X64-NEXT:    movq %r12, %rsi
 ; X64-NEXT:    movq %r13, %rcx
-; X64-NEXT:    callq __divti3
+; X64-NEXT:    callq __divti3 at PLT
 ; X64-NEXT:    movq %rax, %rbx
 ; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
 ; X64-NEXT:    movq %rdx, %rbp
@@ -338,7 +338,7 @@ define i64 @func5(i64 %x, i64 %y) nounwind {
 ; X64-NEXT:    movq %r12, %rsi
 ; X64-NEXT:    movq (%rsp), %rdx # 8-byte Reload
 ; X64-NEXT:    movq %r13, %rcx
-; X64-NEXT:    callq __modti3
+; X64-NEXT:    callq __modti3 at PLT
 ; X64-NEXT:    orq %rax, %rdx
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    testb %r14b, %al
@@ -613,7 +613,7 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    movq %r12, %rdi
 ; X64-NEXT:    movq %rbp, %rsi
 ; X64-NEXT:    movq %r15, %rcx
-; X64-NEXT:    callq __divti3
+; X64-NEXT:    callq __divti3 at PLT
 ; X64-NEXT:    movq %rax, %r13
 ; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
 ; X64-NEXT:    movq %rdx, %r14
@@ -626,7 +626,7 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    movq %rbp, %rsi
 ; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
 ; X64-NEXT:    movq %r15, %rcx
-; X64-NEXT:    callq __modti3
+; X64-NEXT:    callq __modti3 at PLT
 ; X64-NEXT:    orq %rax, %rdx
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    testb %bl, %al
@@ -668,7 +668,7 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    movq %r15, %rdi
 ; X64-NEXT:    movq %r13, %rsi
 ; X64-NEXT:    movq %rbp, %rcx
-; X64-NEXT:    callq __divti3
+; X64-NEXT:    callq __divti3 at PLT
 ; X64-NEXT:    movq %rax, %r12
 ; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
 ; X64-NEXT:    movq %rdx, %r14
@@ -681,7 +681,7 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    movq %r13, %rsi
 ; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
 ; X64-NEXT:    movq %rbp, %rcx
-; X64-NEXT:    callq __modti3
+; X64-NEXT:    callq __modti3 at PLT
 ; X64-NEXT:    orq %rax, %rdx
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    testb %bl, %al
@@ -735,7 +735,7 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    movq %r15, %rdi
 ; X64-NEXT:    movq %r12, %rsi
 ; X64-NEXT:    movq %rbp, %rcx
-; X64-NEXT:    callq __divti3
+; X64-NEXT:    callq __divti3 at PLT
 ; X64-NEXT:    movq %rax, %r13
 ; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
 ; X64-NEXT:    movq %rdx, %r14
@@ -748,7 +748,7 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    movq %r12, %rsi
 ; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
 ; X64-NEXT:    movq %rbp, %rcx
-; X64-NEXT:    callq __modti3
+; X64-NEXT:    callq __modti3 at PLT
 ; X64-NEXT:    orq %rax, %rdx
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    testb %bl, %al
@@ -790,7 +790,7 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    movq %r15, %rdi
 ; X64-NEXT:    movq %r13, %rsi
 ; X64-NEXT:    movq %rbp, %rcx
-; X64-NEXT:    callq __divti3
+; X64-NEXT:    callq __divti3 at PLT
 ; X64-NEXT:    movq %rax, %r12
 ; X64-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
 ; X64-NEXT:    movq %rdx, %r14
@@ -803,7 +803,7 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    movq %r13, %rsi
 ; X64-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
 ; X64-NEXT:    movq %rbp, %rcx
-; X64-NEXT:    callq __modti3
+; X64-NEXT:    callq __modti3 at PLT
 ; X64-NEXT:    orq %rax, %rdx
 ; X64-NEXT:    setne %al
 ; X64-NEXT:    testb %bl, %al

diff  --git a/llvm/test/CodeGen/X86/udiv_fix_sat.ll b/llvm/test/CodeGen/X86/udiv_fix_sat.ll
index d2e3b80c2145..2be51c3ccbba 100644
--- a/llvm/test/CodeGen/X86/udiv_fix_sat.ll
+++ b/llvm/test/CodeGen/X86/udiv_fix_sat.ll
@@ -179,7 +179,7 @@ define i64 @func5(i64 %x, i64 %y) nounwind {
 ; X64-NEXT:    shlq $32, %rdi
 ; X64-NEXT:    xorl %ebx, %ebx
 ; X64-NEXT:    xorl %ecx, %ecx
-; X64-NEXT:    callq __udivti3
+; X64-NEXT:    callq __udivti3 at PLT
 ; X64-NEXT:    cmpq $-1, %rax
 ; X64-NEXT:    movq $-1, %rcx
 ; X64-NEXT:    cmovbq %rax, %rcx


        


More information about the llvm-branch-commits mailing list