[llvm] 2567fea - [X86] Add fabs/fneg rmw style test coverage for #117557

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 4 10:16:46 PST 2024


Author: Simon Pilgrim
Date: 2024-12-04T18:16:30Z
New Revision: 2567feaa13416d1d8c90c1dca6a176a57622c1d6

URL: https://github.com/llvm/llvm-project/commit/2567feaa13416d1d8c90c1dca6a176a57622c1d6
DIFF: https://github.com/llvm/llvm-project/commit/2567feaa13416d1d8c90c1dca6a176a57622c1d6.diff

LOG: [X86] Add fabs/fneg rmw style test coverage for #117557

Missed opportunity to avoid use of fpu for store(fabs(load()) style patterns

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/combine-fabs.ll
    llvm/test/CodeGen/X86/combine-fneg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/combine-fabs.ll b/llvm/test/CodeGen/X86/combine-fabs.ll
index a862ea16a748f6..d337c7693ff7d6 100644
--- a/llvm/test/CodeGen/X86/combine-fabs.ll
+++ b/llvm/test/CodeGen/X86/combine-fabs.ll
@@ -135,6 +135,69 @@ define <4 x float> @combine_vec_fabs_fcopysign(<4 x float> %a, <4 x float> %b) {
   ret <4 x float> %2
 }
 
+; TODO: store(fabs(load())) - convert scalar to integer
+define void @combine_fabs_int_rmw_f64(ptr %ptr) {
+; SSE-LABEL: combine_fabs_int_rmw_f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    movlps %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_fabs_int_rmw_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vmovlps %xmm0, (%rdi)
+; AVX-NEXT:    retq
+  %1 = load double, ptr %ptr
+  %2 = call double @llvm.fabs.f64(double %1)
+  store double %2, ptr %ptr
+  ret void
+}
+
+define void @combine_fabs_int_f32(ptr %src, ptr %dst) {
+; SSE-LABEL: combine_fabs_int_f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    movss %xmm0, (%rsi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_fabs_int_f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
+; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vmovss %xmm0, (%rsi)
+; AVX-NEXT:    retq
+  %1 = load float, ptr %src
+  %2 = call float @llvm.fabs.f32(float %1)
+  store float %2, ptr %dst
+  ret void
+}
+
+; don't convert vector to scalar
+define void @combine_fabs_vec_int_v4f32(ptr %src, ptr %dst) {
+; SSE-LABEL: combine_fabs_vec_int_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps (%rdi), %xmm0
+; SSE-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    movaps %xmm0, (%rsi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_fabs_vec_int_v4f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vbroadcastss {{.*#+}} xmm0 = [NaN,NaN,NaN,NaN]
+; AVX-NEXT:    vandps (%rdi), %xmm0, %xmm0
+; AVX-NEXT:    vmovaps %xmm0, (%rsi)
+; AVX-NEXT:    retq
+  %1 = load <4 x float>, ptr %src
+  %2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %1)
+  store <4 x float> %2, ptr %dst
+  ret void
+}
+
 declare float @llvm.fabs.f32(float %p)
 declare float @llvm.copysign.f32(float %Mag, float %Sgn)
 

diff  --git a/llvm/test/CodeGen/X86/combine-fneg.ll b/llvm/test/CodeGen/X86/combine-fneg.ll
index e4a07348dc96c5..e8e3465c99383d 100644
--- a/llvm/test/CodeGen/X86/combine-fneg.ll
+++ b/llvm/test/CodeGen/X86/combine-fneg.ll
@@ -205,4 +205,85 @@ define <4 x float> @fneg(<4 x float> %Q) nounwind {
   ret <4 x float> %tmp
 }
 
+; TODO: store(fneg(load())) - convert scalar to integer
+define void @fneg_int_rmw_f32(ptr %ptr) {
+; X86-SSE-LABEL: fneg_int_rmw_f32:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movss %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X64-SSE-LABEL: fneg_int_rmw_f32:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    movss %xmm0, (%rdi)
+; X64-SSE-NEXT:    retq
+  %1 = load float, ptr %ptr
+  %2 = fneg float %1
+  store float %2, ptr %ptr
+  ret void
+}
+
+define void @fneg_int_f64(ptr %src, ptr %dst) {
+; X86-SSE1-LABEL: fneg_int_f64:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    fldl (%ecx)
+; X86-SSE1-NEXT:    fchs
+; X86-SSE1-NEXT:    fstpl (%eax)
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE2-LABEL: fneg_int_f64:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    movlps %xmm0, (%eax)
+; X86-SSE2-NEXT:    retl
+;
+; X64-SSE1-LABEL: fneg_int_f64:
+; X64-SSE1:       # %bb.0:
+; X64-SSE1-NEXT:    fldl (%rdi)
+; X64-SSE1-NEXT:    fchs
+; X64-SSE1-NEXT:    fstpl (%rsi)
+; X64-SSE1-NEXT:    retq
+;
+; X64-SSE2-LABEL: fneg_int_f64:
+; X64-SSE2:       # %bb.0:
+; X64-SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE2-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE2-NEXT:    movlps %xmm0, (%rsi)
+; X64-SSE2-NEXT:    retq
+  %1 = load double, ptr %src
+  %2 = fneg double %1
+  store double %2, ptr %dst
+  ret void
+}
 
+; don't convert vector to scalar
+define void @fneg_int_v4f32(ptr %src, ptr %dst) {
+; X86-SSE-LABEL: fneg_int_v4f32:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movaps (%ecx), %xmm0
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movaps %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X64-SSE-LABEL: fneg_int_v4f32:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps (%rdi), %xmm0
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    movaps %xmm0, (%rsi)
+; X64-SSE-NEXT:    retq
+  %1 = load <4 x float>, ptr %src
+  %2 = fneg <4 x float> %1
+  store <4 x float> %2, ptr %dst
+  ret void
+}


        


More information about the llvm-commits mailing list