[llvm] X86: Support FCANONICALIZE on f64 for i686 with SSE2 or AVX (PR #123917)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 22 01:53:38 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-x86

Author: YunQiang Su (wzssyqa)

<details>
<summary>Changes</summary>



---
Full diff: https://github.com/llvm/llvm-project/pull/123917.diff


2 Files Affected:

- (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+1-1) 
- (added) llvm/test/CodeGen/X86/canonicalize-vars-f64-i686.ll (+143) 


``````````diff
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a956074e50d86f..fd1c36d2ba66bc 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -334,10 +334,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
       setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom);
     }
     setOperationAction(ISD::FCANONICALIZE, MVT::f32, Custom);
+    setOperationAction(ISD::FCANONICALIZE, MVT::f64, Custom);
     if (Subtarget.is64Bit()) {
       setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
       setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
-      setOperationAction(ISD::FCANONICALIZE, MVT::f64, Custom);
     }
   }
   if (Subtarget.hasAVX10_2()) {
diff --git a/llvm/test/CodeGen/X86/canonicalize-vars-f64-i686.ll b/llvm/test/CodeGen/X86/canonicalize-vars-f64-i686.ll
new file mode 100644
index 00000000000000..6b66042bdb3146
--- /dev/null
+++ b/llvm/test/CodeGen/X86/canonicalize-vars-f64-i686.ll
@@ -0,0 +1,143 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --default-march x86_64-unknown-linux-gnu --version 5
+; RUN: llc -mattr=+sse2 -mtriple=i686-- < %s | FileCheck %s -check-prefixes=SSE2
+; RUN: llc -mattr=+avx -mtriple=i686-- < %s | FileCheck %s -check-prefixes=AVX
+
+define double @canonicalize_fp64(double %a, double %b) unnamed_addr #0 {
+; SSE2-LABEL: canonicalize_fp64:
+; SSE2:       # %bb.0: # %start
+; SSE2-NEXT:    pushl %ebp
+; SSE2-NEXT:    .cfi_def_cfa_offset 8
+; SSE2-NEXT:    .cfi_offset %ebp, -8
+; SSE2-NEXT:    movl %esp, %ebp
+; SSE2-NEXT:    .cfi_def_cfa_register %ebp
+; SSE2-NEXT:    andl $-8, %esp
+; SSE2-NEXT:    subl $8, %esp
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT:    movapd %xmm0, %xmm2
+; SSE2-NEXT:    cmpunordsd %xmm0, %xmm2
+; SSE2-NEXT:    movapd %xmm2, %xmm3
+; SSE2-NEXT:    andpd %xmm1, %xmm3
+; SSE2-NEXT:    maxsd %xmm0, %xmm1
+; SSE2-NEXT:    andnpd %xmm1, %xmm2
+; SSE2-NEXT:    orpd %xmm3, %xmm2
+; SSE2-NEXT:    mulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; SSE2-NEXT:    movsd %xmm2, (%esp)
+; SSE2-NEXT:    fldl (%esp)
+; SSE2-NEXT:    movl %ebp, %esp
+; SSE2-NEXT:    popl %ebp
+; SSE2-NEXT:    .cfi_def_cfa %esp, 4
+; SSE2-NEXT:    retl
+;
+; AVX-LABEL: canonicalize_fp64:
+; AVX:       # %bb.0: # %start
+; AVX-NEXT:    pushl %ebp
+; AVX-NEXT:    .cfi_def_cfa_offset 8
+; AVX-NEXT:    .cfi_offset %ebp, -8
+; AVX-NEXT:    movl %esp, %ebp
+; AVX-NEXT:    .cfi_def_cfa_register %ebp
+; AVX-NEXT:    andl $-8, %esp
+; AVX-NEXT:    subl $8, %esp
+; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT:    vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT:    vmulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; AVX-NEXT:    vmovsd %xmm0, (%esp)
+; AVX-NEXT:    fldl (%esp)
+; AVX-NEXT:    movl %ebp, %esp
+; AVX-NEXT:    popl %ebp
+; AVX-NEXT:    .cfi_def_cfa %esp, 4
+; AVX-NEXT:    retl
+start:
+  %c = fcmp olt double %a, %b
+  %d = fcmp uno double %a, 0.000000e+00
+  %or.cond.i.i = or i1 %d, %c
+  %e = select i1 %or.cond.i.i, double %b, double %a
+  %f = tail call double @llvm.canonicalize.f64(double %e) #2
+  ret double %f
+}
+
+define void @v_test_canonicalize_var_f64(double addrspace(1)* %out) #1 {
+; SSE2-LABEL: v_test_canonicalize_var_f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT:    mulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE2-NEXT:    movsd %xmm0, (%eax)
+; SSE2-NEXT:    retl
+;
+; AVX-LABEL: v_test_canonicalize_var_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT:    vmulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; AVX-NEXT:    vmovsd %xmm0, (%eax)
+; AVX-NEXT:    retl
+  %val = load double, double addrspace(1)* %out
+  %canonicalized = call double @llvm.canonicalize.f64(double %val)
+  store double %canonicalized, double addrspace(1)* %out
+  ret void
+}
+
+define void @canonicalize_undef(double addrspace(1)* %out) {
+; SSE2-LABEL: canonicalize_undef:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; SSE2-NEXT:    movl $2146959360, 4(%eax) # imm = 0x7FF80000
+; SSE2-NEXT:    movl $0, (%eax)
+; SSE2-NEXT:    retl
+;
+; AVX-LABEL: canonicalize_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; AVX-NEXT:    movl $2146959360, 4(%eax) # imm = 0x7FF80000
+; AVX-NEXT:    movl $0, (%eax)
+; AVX-NEXT:    retl
+  %canonicalized = call double @llvm.canonicalize.f64(double undef)
+  store double %canonicalized, double addrspace(1)* %out
+  ret void
+}
+
+define <4 x double> @canon_fp64_varargsv4f64(<4 x double> %a) {
+; SSE2-LABEL: canon_fp64_varargsv4f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movapd {{.*#+}} xmm2 = [1.0E+0,1.0E+0]
+; SSE2-NEXT:    mulpd %xmm2, %xmm0
+; SSE2-NEXT:    mulpd %xmm2, %xmm1
+; SSE2-NEXT:    retl
+;
+; AVX-LABEL: canon_fp64_varargsv4f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmulpd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; AVX-NEXT:    retl
+  %canonicalized = call <4 x double> @llvm.canonicalize.v4f32(<4 x double> %a)
+  ret <4 x double> %canonicalized
+}
+
+define void @vec_canonicalize_var_v4f64(<4 x double> addrspace(1)* %out) #1 {
+; SSE2-LABEL: vec_canonicalize_var_v4f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; SSE2-NEXT:    movapd {{.*#+}} xmm0 = [1.0E+0,1.0E+0]
+; SSE2-NEXT:    movapd 16(%eax), %xmm1
+; SSE2-NEXT:    mulpd %xmm0, %xmm1
+; SSE2-NEXT:    mulpd (%eax), %xmm0
+; SSE2-NEXT:    movapd %xmm0, (%eax)
+; SSE2-NEXT:    movapd %xmm1, 16(%eax)
+; SSE2-NEXT:    retl
+;
+; AVX-LABEL: vec_canonicalize_var_v4f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; AVX-NEXT:    vmovapd (%eax), %ymm0
+; AVX-NEXT:    vmulpd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; AVX-NEXT:    vmovapd %ymm0, (%eax)
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retl
+  %val = load <4 x double>, <4 x double> addrspace(1)* %out
+  %canonicalized = call <4 x double> @llvm.canonicalize.v4f32(<4 x double> %val)
+  store <4 x double> %canonicalized, <4 x double> addrspace(1)* %out
+  ret void
+}

``````````

</details>


https://github.com/llvm/llvm-project/pull/123917


More information about the llvm-commits mailing list