[llvm] db711f7 - [X86] Prefer fpext(splat(X)) to splat(fpext(x)).

Freddy Ye via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 16 06:48:44 PST 2023


Author: Freddy Ye
Date: 2023-01-16T22:48:30+08:00
New Revision: db711f79ef57c96782f3e5093ee9b79509a317a4

URL: https://github.com/llvm/llvm-project/commit/db711f79ef57c96782f3e5093ee9b79509a317a4
DIFF: https://github.com/llvm/llvm-project/commit/db711f79ef57c96782f3e5093ee9b79509a317a4.diff

LOG: [X86] Prefer fpext(splat(X)) to splat(fpext(x)).

This patch is to fix regression of D122875. X86 has fpext instructions
supporting rmb form, which takes advantage of fpext(fplat(X)) than
splat(fpext(X)).

Reviewed By: RKSimon, skan

Differential Revision: https://reviews.llvm.org/D141657

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/lib/Target/X86/X86ISelLowering.h
    llvm/test/CodeGen/X86/prefer-fpext-splat.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e7815a8c67ca0..5815f334a6842 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -6012,6 +6012,10 @@ bool X86TargetLowering::
   return NewShiftOpcode == ISD::SHL;
 }
 
+bool X86TargetLowering::preferScalarizeSplat(unsigned Opc) const {
+  return Opc != ISD::FP_EXTEND;
+}
+
 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
     const SDNode *N, CombineLevel Level) const {
   assert(((N->getOpcode() == ISD::SHL &&

diff  --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 68a6f19ce05d9..b727725613864 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1091,6 +1091,8 @@ namespace llvm {
         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
         SelectionDAG &DAG) const override;
 
+    bool preferScalarizeSplat(unsigned Opc) const override;
+
     bool shouldFoldConstantShiftPairToMask(const SDNode *N,
                                            CombineLevel Level) const override;
 

diff  --git a/llvm/test/CodeGen/X86/prefer-fpext-splat.ll b/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
index 4f0c2bb65d1d6..1d8b8b3f9a96e 100644
--- a/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
+++ b/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
@@ -2,23 +2,27 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown                              | FileCheck %s --check-prefixes=SSE
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx                  | FileCheck %s --check-prefixes=AVX,AVX1
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2                 | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl    | FileCheck %s --check-prefixes=AVX,AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512FP16
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl    | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512FP16
 
 define <2 x double> @prefer_f32_v2f64(ptr %p) nounwind {
 ; SSE-LABEL: prefer_f32_v2f64:
 ; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-NEXT:    cvtss2sd %xmm0, %xmm0
-; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE-NEXT:    cvtps2pd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: prefer_f32_v2f64:
 ; AVX:       # %bb.0: # %entry
-; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT:    vbroadcastss (%rdi), %xmm0
+; AVX-NEXT:    vcvtps2pd %xmm0, %xmm0
 ; AVX-NEXT:    retq
+;
+; AVX512-LABEL: prefer_f32_v2f64:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vcvtps2pd (%rdi){1to2}, %xmm0
+; AVX512-NEXT:    retq
 entry:
   %0 = load float, ptr %p, align 4
   %vecinit.i = insertelement <2 x float> undef, float %0, i64 0
@@ -31,39 +35,21 @@ define <4 x double> @prefer_f32_v4f64(ptr %p) nounwind {
 ; SSE-LABEL: prefer_f32_v4f64:
 ; SSE:       # %bb.0: # %entry
 ; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-NEXT:    cvtss2sd %xmm0, %xmm0
-; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE-NEXT:    cvtps2pd %xmm0, %xmm0
 ; SSE-NEXT:    movaps %xmm0, %xmm1
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: prefer_f32_v4f64:
-; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: prefer_f32_v4f64:
-; AVX2:       # %bb.0: # %entry
-; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX2-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX2-NEXT:    vbroadcastsd %xmm0, %ymm0
-; AVX2-NEXT:    retq
+; AVX-LABEL: prefer_f32_v4f64:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    vbroadcastss (%rdi), %xmm0
+; AVX-NEXT:    vcvtps2pd %xmm0, %ymm0
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: prefer_f32_v4f64:
 ; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX512-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX512-NEXT:    vbroadcastsd %xmm0, %ymm0
+; AVX512-NEXT:    vcvtps2pd (%rdi){1to4}, %ymm0
 ; AVX512-NEXT:    retq
-;
-; AVX512FP16-LABEL: prefer_f32_v4f64:
-; AVX512FP16:       # %bb.0: # %entry
-; AVX512FP16-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX512FP16-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX512FP16-NEXT:    vbroadcastsd %xmm0, %ymm0
-; AVX512FP16-NEXT:    retq
 entry:
   %0 = load float, ptr %p, align 4
   %vecinit.i = insertelement <4 x float> undef, float %0, i64 0
@@ -100,19 +86,15 @@ define <4 x float> @prefer_f16_v4f32(ptr %p) nounwind {
 ; AVX2-NEXT:    popq %rax
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: prefer_f16_v4f32:
-; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    movzwl (%rdi), %eax
-; AVX512-NEXT:    vmovd %eax, %xmm0
-; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512-NEXT:    vbroadcastss %xmm0, %xmm0
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: prefer_f16_v4f32:
+; AVX512F:       # %bb.0: # %entry
+; AVX512F-NEXT:    vpbroadcastw (%rdi), %xmm0
+; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512F-NEXT:    retq
 ;
 ; AVX512FP16-LABEL: prefer_f16_v4f32:
 ; AVX512FP16:       # %bb.0: # %entry
-; AVX512FP16-NEXT:    vmovsh (%rdi), %xmm0
-; AVX512FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
-; AVX512FP16-NEXT:    vbroadcastss %xmm0, %xmm0
+; AVX512FP16-NEXT:    vcvtph2psx (%rdi){1to4}, %xmm0
 ; AVX512FP16-NEXT:    retq
 entry:
   %0 = load half, ptr %p, align 4
@@ -152,19 +134,15 @@ define <8 x float> @prefer_f16_v8f32(ptr %p) nounwind {
 ; AVX2-NEXT:    popq %rax
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: prefer_f16_v8f32:
-; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    movzwl (%rdi), %eax
-; AVX512-NEXT:    vmovd %eax, %xmm0
-; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512-NEXT:    vbroadcastss %xmm0, %ymm0
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: prefer_f16_v8f32:
+; AVX512F:       # %bb.0: # %entry
+; AVX512F-NEXT:    vpbroadcastw (%rdi), %xmm0
+; AVX512F-NEXT:    vcvtph2ps %xmm0, %ymm0
+; AVX512F-NEXT:    retq
 ;
 ; AVX512FP16-LABEL: prefer_f16_v8f32:
 ; AVX512FP16:       # %bb.0: # %entry
-; AVX512FP16-NEXT:    vmovsh (%rdi), %xmm0
-; AVX512FP16-NEXT:    vcvtsh2ss %xmm0, %xmm0, %xmm0
-; AVX512FP16-NEXT:    vbroadcastss %xmm0, %ymm0
+; AVX512FP16-NEXT:    vcvtph2psx (%rdi){1to8}, %ymm0
 ; AVX512FP16-NEXT:    retq
 entry:
   %0 = load half, ptr %p, align 4
@@ -185,40 +163,28 @@ define <2 x double> @prefer_f16_v2f64(ptr %p) nounwind {
 ; SSE-NEXT:    popq %rax
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: prefer_f16_v2f64:
-; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    pushq %rax
-; AVX1-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; AVX1-NEXT:    popq %rax
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: prefer_f16_v2f64:
-; AVX2:       # %bb.0: # %entry
-; AVX2-NEXT:    pushq %rax
-; AVX2-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; AVX2-NEXT:    popq %rax
-; AVX2-NEXT:    retq
+; AVX-LABEL: prefer_f16_v2f64:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    pushq %rax
+; AVX-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT:    popq %rax
+; AVX-NEXT:    retq
 ;
-; AVX512-LABEL: prefer_f16_v2f64:
-; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    movzwl (%rdi), %eax
-; AVX512-NEXT:    vmovd %eax, %xmm0
-; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX512-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: prefer_f16_v2f64:
+; AVX512F:       # %bb.0: # %entry
+; AVX512F-NEXT:    vpbroadcastw (%rdi), %xmm0
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512F-NEXT:    vcvtps2pd %xmm0, %xmm0
+; AVX512F-NEXT:    retq
 ;
 ; AVX512FP16-LABEL: prefer_f16_v2f64:
 ; AVX512FP16:       # %bb.0: # %entry
-; AVX512FP16-NEXT:    vmovsh (%rdi), %xmm0
-; AVX512FP16-NEXT:    vcvtsh2sd %xmm0, %xmm0, %xmm0
-; AVX512FP16-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX512FP16-NEXT:    vcvtph2pd (%rdi){1to2}, %xmm0
 ; AVX512FP16-NEXT:    retq
 entry:
   %0 = load half, ptr %p, align 4
@@ -261,20 +227,16 @@ define <4 x double> @prefer_f16_v4f64(ptr %p) nounwind {
 ; AVX2-NEXT:    popq %rax
 ; AVX2-NEXT:    retq
 ;
-; AVX512-LABEL: prefer_f16_v4f64:
-; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    movzwl (%rdi), %eax
-; AVX512-NEXT:    vmovd %eax, %xmm0
-; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX512-NEXT:    vbroadcastsd %xmm0, %ymm0
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: prefer_f16_v4f64:
+; AVX512F:       # %bb.0: # %entry
+; AVX512F-NEXT:    vpbroadcastw (%rdi), %xmm0
+; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512F-NEXT:    vcvtps2pd %xmm0, %ymm0
+; AVX512F-NEXT:    retq
 ;
 ; AVX512FP16-LABEL: prefer_f16_v4f64:
 ; AVX512FP16:       # %bb.0: # %entry
-; AVX512FP16-NEXT:    vmovsh (%rdi), %xmm0
-; AVX512FP16-NEXT:    vcvtsh2sd %xmm0, %xmm0, %xmm0
-; AVX512FP16-NEXT:    vbroadcastsd %xmm0, %ymm0
+; AVX512FP16-NEXT:    vcvtph2pd (%rdi){1to4}, %ymm0
 ; AVX512FP16-NEXT:    retq
 entry:
   %0 = load half, ptr %p, align 4


        


More information about the llvm-commits mailing list