[llvm] b693e1c - [X86][GlobalISel] Enable G_LROUND/G_LLROUND with libcall mapping (#125096)

via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 2 23:12:47 PST 2025


Author: JaydeepChauhan14
Date: 2025-02-03T14:12:43+07:00
New Revision: b693e1cf837a5dca6538c26debd7b25ef8f52db6

URL: https://github.com/llvm/llvm-project/commit/b693e1cf837a5dca6538c26debd7b25ef8f52db6
DIFF: https://github.com/llvm/llvm-project/commit/b693e1cf837a5dca6538c26debd7b25ef8f52db6.diff

LOG: [X86][GlobalISel] Enable G_LROUND/G_LLROUND with libcall mapping (#125096)

Added: 
    

Modified: 
    llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
    llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
    llvm/test/CodeGen/X86/llround-conv.ll
    llvm/test/CodeGen/X86/lround-conv-i32.ll
    llvm/test/CodeGen/X86/lround-conv-i64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index f0b241fa5a6186..d4cb224c35d74d 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -411,6 +411,10 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
   } while (0)
 
   switch (Opcode) {
+  case TargetOpcode::G_LROUND:
+    RTLIBCASE(LROUND_F);
+  case TargetOpcode::G_LLROUND:
+    RTLIBCASE(LLROUND_F);
   case TargetOpcode::G_MUL:
     RTLIBCASE_INT(MUL_I);
   case TargetOpcode::G_SDIV:
@@ -1267,6 +1271,8 @@ LegalizerHelper::libcall(MachineInstr &MI, LostDebugLocObserver &LocObserver) {
       return Status;
     break;
   }
+  case TargetOpcode::G_LROUND:
+  case TargetOpcode::G_LLROUND:
   case TargetOpcode::G_INTRINSIC_LRINT:
   case TargetOpcode::G_INTRINSIC_LLRINT: {
     LLT LLTy = MRI.getType(MI.getOperand(1).getReg());

diff  --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
index bab7fe9d25e441..88b5ec8cd004fa 100644
--- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
+++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
@@ -99,6 +99,10 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
       .widenScalarToNextPow2(0, /*Min=*/8)
       .clampScalar(0, s8, sMaxScalar);
 
+  getActionDefinitionsBuilder(G_LROUND).libcall();
+
+  getActionDefinitionsBuilder(G_LLROUND).libcall();
+
   // merge/unmerge
   for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
     unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;

diff  --git a/llvm/test/CodeGen/X86/llround-conv.ll b/llvm/test/CodeGen/X86/llround-conv.ll
index 1ccda2bc27bbbd..19a980b72809ea 100644
--- a/llvm/test/CodeGen/X86/llround-conv.ll
+++ b/llvm/test/CodeGen/X86/llround-conv.ll
@@ -1,7 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-unknown             | FileCheck %s --check-prefix=X86
 ; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86
 ; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64
 
 define i64 @testmsxs(float %x) {
 ; X86-LABEL: testmsxs:
@@ -26,9 +28,29 @@ define i64 @testmsxs(float %x) {
 ; SSE2-NEXT:    .cfi_def_cfa_offset 4
 ; SSE2-NEXT:    retl
 ;
+; GISEL-X86-LABEL: testmsxs:
+; GISEL-X86:       # %bb.0: # %entry
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 16
+; GISEL-X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT:    movl %eax, (%esp)
+; GISEL-X86-NEXT:    calll llroundf
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 4
+; GISEL-X86-NEXT:    retl
+;
 ; X64-LABEL: testmsxs:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    jmp llroundf at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: testmsxs:
+; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-NEXT:    pushq %rax
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 16
+; GISEL-X64-NEXT:    callq llroundf
+; GISEL-X64-NEXT:    popq %rcx
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 8
+; GISEL-X64-NEXT:    retq
 entry:
   %0 = tail call i64 @llvm.llround.f32(float %x)
   ret i64 %0
@@ -57,9 +79,34 @@ define i64 @testmsxd(double %x) {
 ; SSE2-NEXT:    .cfi_def_cfa_offset 4
 ; SSE2-NEXT:    retl
 ;
+; GISEL-X86-LABEL: testmsxd:
+; GISEL-X86:       # %bb.0: # %entry
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 16
+; GISEL-X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT:    movl 4(%eax), %eax
+; GISEL-X86-NEXT:    xorl %edx, %edx
+; GISEL-X86-NEXT:    addl %esp, %edx
+; GISEL-X86-NEXT:    movl %ecx, (%esp)
+; GISEL-X86-NEXT:    movl %eax, 4(%edx)
+; GISEL-X86-NEXT:    calll llround
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 4
+; GISEL-X86-NEXT:    retl
+;
 ; X64-LABEL: testmsxd:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    jmp llround at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: testmsxd:
+; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-NEXT:    pushq %rax
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 16
+; GISEL-X64-NEXT:    callq llround
+; GISEL-X64-NEXT:    popq %rcx
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 8
+; GISEL-X64-NEXT:    retq
 entry:
   %0 = tail call i64 @llvm.llround.f64(double %x)
   ret i64 %0
@@ -88,14 +135,165 @@ define i64 @testmsll(x86_fp80 %x) {
 ; SSE2-NEXT:    .cfi_def_cfa_offset 4
 ; SSE2-NEXT:    retl
 ;
+; GISEL-X86-LABEL: testmsll:
+; GISEL-X86:       # %bb.0: # %entry
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 16
+; GISEL-X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT:    fstpt (%esp)
+; GISEL-X86-NEXT:    calll llroundl
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 4
+; GISEL-X86-NEXT:    retl
+;
 ; X64-LABEL: testmsll:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    jmp llroundl at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: testmsll:
+; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-NEXT:    subq $24, %rsp
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 32
+; GISEL-X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; GISEL-X64-NEXT:    fstpt (%rsp)
+; GISEL-X64-NEXT:    callq llroundl
+; GISEL-X64-NEXT:    addq $24, %rsp
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 8
+; GISEL-X64-NEXT:    retq
 entry:
   %0 = tail call i64 @llvm.llround.f80(x86_fp80 %x)
   ret i64 %0
 }
 
-declare i64 @llvm.llround.f32(float) nounwind readnone
-declare i64 @llvm.llround.f64(double) nounwind readnone
-declare i64 @llvm.llround.f80(x86_fp80) nounwind readnone
+define i64 @test_llround_i64_f32(float %x) nounwind {
+; X86-LABEL: test_llround_i64_f32:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    flds {{[0-9]+}}(%esp)
+; X86-NEXT:    fstps (%esp)
+; X86-NEXT:    calll llroundf
+; X86-NEXT:    popl %ecx
+; X86-NEXT:    retl
+;
+; SSE2-LABEL: test_llround_i64_f32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pushl %eax
+; SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT:    movss %xmm0, (%esp)
+; SSE2-NEXT:    calll llroundf
+; SSE2-NEXT:    popl %ecx
+; SSE2-NEXT:    retl
+;
+; GISEL-X86-LABEL: test_llround_i64_f32:
+; GISEL-X86:       # %bb.0:
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT:    movl %eax, (%esp)
+; GISEL-X86-NEXT:    calll llroundf
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    retl
+;
+; X64-LABEL: test_llround_i64_f32:
+; X64:       # %bb.0:
+; X64-NEXT:    jmp llroundf at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: test_llround_i64_f32:
+; GISEL-X64:       # %bb.0:
+; GISEL-X64-NEXT:    pushq %rax
+; GISEL-X64-NEXT:    callq llroundf
+; GISEL-X64-NEXT:    popq %rcx
+; GISEL-X64-NEXT:    retq
+  %conv = call i64 @llvm.llround.i64.f32(float %x)
+  ret i64 %conv
+}
+
+define i64 @test_llround_i64_f64(double %x) nounwind {
+; X86-LABEL: test_llround_i64_f64:
+; X86:       # %bb.0:
+; X86-NEXT:    subl $8, %esp
+; X86-NEXT:    fldl {{[0-9]+}}(%esp)
+; X86-NEXT:    fstpl (%esp)
+; X86-NEXT:    calll llround
+; X86-NEXT:    addl $8, %esp
+; X86-NEXT:    retl
+;
+; SSE2-LABEL: test_llround_i64_f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    subl $8, %esp
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT:    movsd %xmm0, (%esp)
+; SSE2-NEXT:    calll llround
+; SSE2-NEXT:    addl $8, %esp
+; SSE2-NEXT:    retl
+;
+; GISEL-X86-LABEL: test_llround_i64_f64:
+; GISEL-X86:       # %bb.0:
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT:    movl 4(%eax), %eax
+; GISEL-X86-NEXT:    xorl %edx, %edx
+; GISEL-X86-NEXT:    addl %esp, %edx
+; GISEL-X86-NEXT:    movl %ecx, (%esp)
+; GISEL-X86-NEXT:    movl %eax, 4(%edx)
+; GISEL-X86-NEXT:    calll llround
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    retl
+;
+; X64-LABEL: test_llround_i64_f64:
+; X64:       # %bb.0:
+; X64-NEXT:    jmp llround at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: test_llround_i64_f64:
+; GISEL-X64:       # %bb.0:
+; GISEL-X64-NEXT:    pushq %rax
+; GISEL-X64-NEXT:    callq llround
+; GISEL-X64-NEXT:    popq %rcx
+; GISEL-X64-NEXT:    retq
+  %conv = call i64 @llvm.llround.i64.f64(double %x)
+  ret i64 %conv
+}
+
+define i64 @test_llround_i64_f80(x86_fp80 %x) nounwind {
+; X86-LABEL: test_llround_i64_f80:
+; X86:       # %bb.0:
+; X86-NEXT:    subl $12, %esp
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fstpt (%esp)
+; X86-NEXT:    calll llroundl
+; X86-NEXT:    addl $12, %esp
+; X86-NEXT:    retl
+;
+; SSE2-LABEL: test_llround_i64_f80:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    subl $12, %esp
+; SSE2-NEXT:    fldt {{[0-9]+}}(%esp)
+; SSE2-NEXT:    fstpt (%esp)
+; SSE2-NEXT:    calll llroundl
+; SSE2-NEXT:    addl $12, %esp
+; SSE2-NEXT:    retl
+;
+; GISEL-X86-LABEL: test_llround_i64_f80:
+; GISEL-X86:       # %bb.0:
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT:    fstpt (%esp)
+; GISEL-X86-NEXT:    calll llroundl
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    retl
+;
+; X64-LABEL: test_llround_i64_f80:
+; X64:       # %bb.0:
+; X64-NEXT:    jmp llroundl at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: test_llround_i64_f80:
+; GISEL-X64:       # %bb.0:
+; GISEL-X64-NEXT:    subq $24, %rsp
+; GISEL-X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; GISEL-X64-NEXT:    fstpt (%rsp)
+; GISEL-X64-NEXT:    callq llroundl
+; GISEL-X64-NEXT:    addq $24, %rsp
+; GISEL-X64-NEXT:    retq
+  %conv = call i64 @llvm.llround.i64.f80(x86_fp80 %x)
+  ret i64 %conv
+}

diff  --git a/llvm/test/CodeGen/X86/lround-conv-i32.ll b/llvm/test/CodeGen/X86/lround-conv-i32.ll
index 06baf1f273c95f..c37536623143de 100644
--- a/llvm/test/CodeGen/X86/lround-conv-i32.ll
+++ b/llvm/test/CodeGen/X86/lround-conv-i32.ll
@@ -1,47 +1,100 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-unknown             | FileCheck %s
 ; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2 | FileCheck %s
+; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86
 ; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64
 
-define i32 @testmsws(float %x) {
+define i32 @testmsws(float %x) nounwind {
 ; CHECK-LABEL: testmsws:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    jmp lroundf # TAILCALL
 ;
+; GISEL-X86-LABEL: testmsws:
+; GISEL-X86:       # %bb.0: # %entry
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT:    movl %eax, (%esp)
+; GISEL-X86-NEXT:    calll lroundf
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    retl
+;
 ; X64-LABEL: testmsws:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    jmp lroundf at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: testmsws:
+; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-NEXT:    pushq %rax
+; GISEL-X64-NEXT:    callq lroundf
+; GISEL-X64-NEXT:    popq %rcx
+; GISEL-X64-NEXT:    retq
 entry:
   %0 = tail call i32 @llvm.lround.i32.f32(float %x)
   ret i32 %0
 }
 
-define i32 @testmswd(double %x) {
+define i32 @testmswd(double %x) nounwind {
 ; CHECK-LABEL: testmswd:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    jmp lround # TAILCALL
 ;
+; GISEL-X86-LABEL: testmswd:
+; GISEL-X86:       # %bb.0: # %entry
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT:    movl 4(%eax), %eax
+; GISEL-X86-NEXT:    xorl %edx, %edx
+; GISEL-X86-NEXT:    addl %esp, %edx
+; GISEL-X86-NEXT:    movl %ecx, (%esp)
+; GISEL-X86-NEXT:    movl %eax, 4(%edx)
+; GISEL-X86-NEXT:    calll lround
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    retl
+;
 ; X64-LABEL: testmswd:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    jmp lround at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: testmswd:
+; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-NEXT:    pushq %rax
+; GISEL-X64-NEXT:    callq lround
+; GISEL-X64-NEXT:    popq %rcx
+; GISEL-X64-NEXT:    retq
 entry:
   %0 = tail call i32 @llvm.lround.i32.f64(double %x)
   ret i32 %0
 }
 
-define i32 @testmsll(x86_fp80 %x) {
+define i32 @testmsll(x86_fp80 %x) nounwind {
 ; CHECK-LABEL: testmsll:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    jmp lroundl # TAILCALL
 ;
+; GISEL-X86-LABEL: testmsll:
+; GISEL-X86:       # %bb.0: # %entry
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT:    fstpt (%esp)
+; GISEL-X86-NEXT:    calll lroundl
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    retl
+;
 ; X64-LABEL: testmsll:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    jmp lroundl at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: testmsll:
+; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-NEXT:    subq $24, %rsp
+; GISEL-X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; GISEL-X64-NEXT:    fstpt (%rsp)
+; GISEL-X64-NEXT:    callq lroundl
+; GISEL-X64-NEXT:    addq $24, %rsp
+; GISEL-X64-NEXT:    retq
 entry:
   %0 = tail call i32 @llvm.lround.i32.f80(x86_fp80 %x)
   ret i32 %0
 }
-
-declare i32 @llvm.lround.i32.f32(float) nounwind readnone
-declare i32 @llvm.lround.i32.f64(double) nounwind readnone
-declare i32 @llvm.lround.i32.f80(x86_fp80) nounwind readnone

diff  --git a/llvm/test/CodeGen/X86/lround-conv-i64.ll b/llvm/test/CodeGen/X86/lround-conv-i64.ll
index 1cfa4267365872..36b86f30ca1334 100644
--- a/llvm/test/CodeGen/X86/lround-conv-i64.ll
+++ b/llvm/test/CodeGen/X86/lround-conv-i64.ll
@@ -1,33 +1,98 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86
 ; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64
 
 define i64 @testmsxs(float %x) {
+; GISEL-X86-LABEL: testmsxs:
+; GISEL-X86:       # %bb.0: # %entry
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 16
+; GISEL-X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT:    movl %eax, (%esp)
+; GISEL-X86-NEXT:    calll lroundf
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 4
+; GISEL-X86-NEXT:    retl
+;
 ; CHECK-LABEL: testmsxs:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    jmp lroundf at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: testmsxs:
+; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-NEXT:    pushq %rax
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 16
+; GISEL-X64-NEXT:    callq lroundf
+; GISEL-X64-NEXT:    popq %rcx
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 8
+; GISEL-X64-NEXT:    retq
 entry:
   %0 = tail call i64 @llvm.lround.i64.f32(float %x)
   ret i64 %0
 }
 
 define i64 @testmsxd(double %x) {
+; GISEL-X86-LABEL: testmsxd:
+; GISEL-X86:       # %bb.0: # %entry
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 16
+; GISEL-X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; GISEL-X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; GISEL-X86-NEXT:    movl 4(%eax), %eax
+; GISEL-X86-NEXT:    xorl %edx, %edx
+; GISEL-X86-NEXT:    addl %esp, %edx
+; GISEL-X86-NEXT:    movl %ecx, (%esp)
+; GISEL-X86-NEXT:    movl %eax, 4(%edx)
+; GISEL-X86-NEXT:    calll lround
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 4
+; GISEL-X86-NEXT:    retl
+;
 ; CHECK-LABEL: testmsxd:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    jmp lround at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: testmsxd:
+; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-NEXT:    pushq %rax
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 16
+; GISEL-X64-NEXT:    callq lround
+; GISEL-X64-NEXT:    popq %rcx
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 8
+; GISEL-X64-NEXT:    retq
 entry:
   %0 = tail call i64 @llvm.lround.i64.f64(double %x)
   ret i64 %0
 }
 
 define i64 @testmsll(x86_fp80 %x) {
+; GISEL-X86-LABEL: testmsll:
+; GISEL-X86:       # %bb.0: # %entry
+; GISEL-X86-NEXT:    subl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 16
+; GISEL-X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; GISEL-X86-NEXT:    fstpt (%esp)
+; GISEL-X86-NEXT:    calll lroundl
+; GISEL-X86-NEXT:    addl $12, %esp
+; GISEL-X86-NEXT:    .cfi_def_cfa_offset 4
+; GISEL-X86-NEXT:    retl
+;
 ; CHECK-LABEL: testmsll:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    jmp lroundl at PLT # TAILCALL
+;
+; GISEL-X64-LABEL: testmsll:
+; GISEL-X64:       # %bb.0: # %entry
+; GISEL-X64-NEXT:    subq $24, %rsp
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 32
+; GISEL-X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; GISEL-X64-NEXT:    fstpt (%rsp)
+; GISEL-X64-NEXT:    callq lroundl
+; GISEL-X64-NEXT:    addq $24, %rsp
+; GISEL-X64-NEXT:    .cfi_def_cfa_offset 8
+; GISEL-X64-NEXT:    retq
 entry:
   %0 = tail call i64 @llvm.lround.i64.f80(x86_fp80 %x)
   ret i64 %0
 }
-
-declare i64 @llvm.lround.i64.f32(float) nounwind readnone
-declare i64 @llvm.lround.i64.f64(double) nounwind readnone
-declare i64 @llvm.lround.i64.f80(x86_fp80) nounwind readnone


        


More information about the llvm-commits mailing list