[llvm] X86: Support FCANONICALIZE on f64 for i686 with SSE2 or AVX (PR #123917)
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 22 03:00:17 PST 2025
================
@@ -0,0 +1,143 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --default-march x86_64-unknown-linux-gnu --version 5
+; RUN: llc -mattr=+sse2 -mtriple=i686-- < %s | FileCheck %s -check-prefixes=SSE2
+; RUN: llc -mattr=+avx -mtriple=i686-- < %s | FileCheck %s -check-prefixes=AVX
+
+define double @canonicalize_fp64(double %a, double %b) unnamed_addr #0 {
+; SSE2-LABEL: canonicalize_fp64:
+; SSE2: # %bb.0: # %start
+; SSE2-NEXT: pushl %ebp
+; SSE2-NEXT: .cfi_def_cfa_offset 8
+; SSE2-NEXT: .cfi_offset %ebp, -8
+; SSE2-NEXT: movl %esp, %ebp
+; SSE2-NEXT: .cfi_def_cfa_register %ebp
+; SSE2-NEXT: andl $-8, %esp
+; SSE2-NEXT: subl $8, %esp
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: movapd %xmm0, %xmm2
+; SSE2-NEXT: cmpunordsd %xmm0, %xmm2
+; SSE2-NEXT: movapd %xmm2, %xmm3
+; SSE2-NEXT: andpd %xmm1, %xmm3
+; SSE2-NEXT: maxsd %xmm0, %xmm1
+; SSE2-NEXT: andnpd %xmm1, %xmm2
+; SSE2-NEXT: orpd %xmm3, %xmm2
+; SSE2-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; SSE2-NEXT: movsd %xmm2, (%esp)
+; SSE2-NEXT: fldl (%esp)
+; SSE2-NEXT: movl %ebp, %esp
+; SSE2-NEXT: popl %ebp
+; SSE2-NEXT: .cfi_def_cfa %esp, 4
+; SSE2-NEXT: retl
+;
+; AVX-LABEL: canonicalize_fp64:
+; AVX: # %bb.0: # %start
+; AVX-NEXT: pushl %ebp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: .cfi_offset %ebp, -8
+; AVX-NEXT: movl %esp, %ebp
+; AVX-NEXT: .cfi_def_cfa_register %ebp
+; AVX-NEXT: andl $-8, %esp
+; AVX-NEXT: subl $8, %esp
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmaxsd %xmm0, %xmm1, %xmm2
+; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; AVX-NEXT: vmovsd %xmm0, (%esp)
+; AVX-NEXT: fldl (%esp)
+; AVX-NEXT: movl %ebp, %esp
+; AVX-NEXT: popl %ebp
+; AVX-NEXT: .cfi_def_cfa %esp, 4
+; AVX-NEXT: retl
+start:
+ %c = fcmp olt double %a, %b
+ %d = fcmp uno double %a, 0.000000e+00
+ %or.cond.i.i = or i1 %d, %c
+ %e = select i1 %or.cond.i.i, double %b, double %a
+ %f = tail call double @llvm.canonicalize.f64(double %e) #2
+ ret double %f
+}
+
+define void @v_test_canonicalize_var_f64(double addrspace(1)* %out) #1 {
+; SSE2-LABEL: v_test_canonicalize_var_f64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE2-NEXT: movsd %xmm0, (%eax)
+; SSE2-NEXT: retl
+;
+; AVX-LABEL: v_test_canonicalize_var_f64:
+; AVX: # %bb.0:
+; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; AVX-NEXT: vmovsd %xmm0, (%eax)
+; AVX-NEXT: retl
+ %val = load double, double addrspace(1)* %out
----------------
arsenm wrote:
Use opaque pointers. Also can't you just use a function argument?
https://github.com/llvm/llvm-project/pull/123917
More information about the llvm-commits
mailing list