[llvm] [TargetLowering] Lower ldexp into target supported instructions (PR #67552)

via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 8 04:51:51 PDT 2023


https://github.com/huhu233 updated https://github.com/llvm/llvm-project/pull/67552

>From cc934138fa065edacd93322a8dbcc0ac1caeb826 Mon Sep 17 00:00:00 2001
From: huhu233 <1293577861 at qq.com>
Date: Sun, 8 Oct 2023 19:48:23 +0800
Subject: [PATCH 1/2] [AArch64] Lower mathlib call ldexp into fscale when sve
 is enabled

The function of 'fscale' is equivalent to mathlib call ldexp, but has
better performance. This patch lowers ldexp into fscale when sve is enabled.
---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 51 ++++++++++++++
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |  1 +
 llvm/test/CodeGen/AArch64/ldexp.ll            | 66 +++++++++++++++++++
 3 files changed, 118 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/ldexp.ll

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 9cda43e58d27a43..95b91434348b91d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1642,6 +1642,12 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
 
   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
 
+  if (Subtarget->hasSVE()) {
+    setOperationAction(ISD::FLDEXP, MVT::f64, Custom);
+    setOperationAction(ISD::FLDEXP, MVT::f32, Custom);
+    setOperationAction(ISD::FLDEXP, MVT::f16, Custom);
+  }
+
   PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive();
 
   IsStrictFPEnabled = true;
@@ -6217,6 +6223,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   case ISD::FSHL:
   case ISD::FSHR:
     return LowerFunnelShift(Op, DAG);
+  case ISD::FLDEXP:
+    return LowerFLDEXP(Op, DAG);
   }
 }
 
@@ -26360,3 +26368,46 @@ bool AArch64TargetLowering::preferScalarizeSplat(SDNode *N) const {
   }
   return true;
 }
+
+SDValue AArch64TargetLowering::LowerFLDEXP(SDValue Op,
+                                           SelectionDAG &DAG) const {
+  SDValue X = Op.getOperand(0);
+  EVT XScalarTy = X.getValueType();
+  SDValue Exp = Op.getOperand(1);
+
+  SDLoc DL(Op);
+  EVT XVT, ExpVT;
+  switch (Op.getSimpleValueType().SimpleTy) {
+  default:
+    return SDValue();
+  case MVT::f16:
+    X = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, X);
+  case MVT::f32:
+    XVT = MVT::nxv4f32;
+    ExpVT = MVT::nxv4i32;
+    break;
+  case MVT::f64:
+    XVT = MVT::nxv2f64;
+    ExpVT = MVT::nxv2i64;
+    Exp = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Exp);
+    break;
+  }
+
+  SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
+  SDValue VX =
+      DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, XVT, DAG.getUNDEF(XVT), X, Zero);
+  SDValue VExp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ExpVT,
+                             DAG.getUNDEF(ExpVT), Exp, Zero);
+  SDValue VPg = getPTrue(DAG, DL, XVT.changeVectorElementType(MVT::i1),
+                         AArch64SVEPredPattern::all);
+  SDValue FScale =
+      DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XVT,
+                  DAG.getConstant(Intrinsic::aarch64_sve_fscale, DL, MVT::i64),
+                  VPg, VX, VExp);
+  SDValue Final =
+      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, X.getValueType(), FScale, Zero);
+  if (X.getValueType() != XScalarTy)
+    Final = DAG.getNode(ISD::FP_ROUND, DL, XScalarTy, Final,
+                        DAG.getIntPtrConstant(1, SDLoc(Op)));
+  return Final;
+}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 9dcfba3a229cccd..c67083a70b0fa02 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -1136,6 +1136,7 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerFixedLengthFPToIntToSVE(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerFixedLengthVECTOR_SHUFFLEToSVE(SDValue Op,
                                               SelectionDAG &DAG) const;
+  SDValue LowerFLDEXP(SDValue Op, SelectionDAG &DAG) const;
 
   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
                         SmallVectorImpl<SDNode *> &Created) const override;
diff --git a/llvm/test/CodeGen/AArch64/ldexp.ll b/llvm/test/CodeGen/AArch64/ldexp.ll
new file mode 100644
index 000000000000000..07ff574cd478cbc
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ldexp.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=aarch64 -mattr=+sve < %s -o - | FileCheck %s
+
+define double @testExp(double %val, i32 %a) {
+; CHECK-LABEL: testExp:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    sxtw x8, w0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    fscale z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %call = tail call fast double @ldexp(double %val, i32 %a)
+  ret double %call
+}
+
+declare double @ldexp(double, i32) #1
+
+define float @testExpf(float %val, i32 %a) {
+; CHECK-LABEL: testExpf:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    fmov s1, w0
+; CHECK-NEXT:    // kill: def $s0 killed $s0 def $z0
+; CHECK-NEXT:    fscale z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
+; CHECK-NEXT:    ret
+entry:
+  %call = tail call fast float @ldexpf(float %val, i32 %a)
+  ret float %call
+}
+
+declare float @ldexpf(float, i32) #1
+
+define fp128 @testExpl(fp128 %val, i32 %a) {
+; CHECK-LABEL: testExpl:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    b ldexpl
+entry:
+  %call = tail call fast fp128 @ldexpl(fp128 %val, i32 %a)
+  ret fp128 %call
+}
+
+declare fp128 @ldexpl(fp128, i32) #1
+
+define half @testExpf16(half %val, i32 %a) {
+; CHECK-LABEL: testExpf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    fmov s1, w0
+; CHECK-NEXT:    fscale z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fcvt h0, s0
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call fast half @llvm.ldexp.f16.i32(half %val, i32 %a)
+  ret half %0
+}
+
+declare half @llvm.ldexp.f16.i32(half, i32) #1
+
+attributes #1 = { mustprogress nofree nosync nounwind willreturn memory(none) }
+

>From 6593af56d51e27f643be756ec1c25eede30af318 Mon Sep 17 00:00:00 2001
From: huhu233 <1293577861 at qq.com>
Date: Sun, 8 Oct 2023 19:50:28 +0800
Subject: [PATCH 2/2] [X86] Lower mathlib call ldexp into scalef when avx512 is
 enabled

---
 llvm/lib/Target/X86/X86ISelLowering.cpp       |  52 +++++
 llvm/lib/Target/X86/X86ISelLowering.h         |   2 +
 llvm/test/CodeGen/X86/call-ldexp.ll           |  62 ++++++
 .../X86/fold-int-pow2-with-fmul-or-fdiv.ll    | 202 ++++++++----------
 4 files changed, 208 insertions(+), 110 deletions(-)
 create mode 100644 llvm/test/CodeGen/X86/call-ldexp.ll

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index c4cd2a672fe7b26..7df62e58570758e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2395,6 +2395,12 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
   }
 
+  if (Subtarget.hasAVX512()) {
+    setOperationAction(ISD::FLDEXP, MVT::f16, Custom);
+    setOperationAction(ISD::FLDEXP, MVT::f32, Custom);
+    setOperationAction(ISD::FLDEXP, MVT::f64, Custom);
+  }
+
   // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
   // is. We should promote the value to 64-bits to solve this.
   // This is what the CRT headers do - `fmodf` is an inline header
@@ -31904,6 +31910,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   case ISD::ADDRSPACECAST:      return LowerADDRSPACECAST(Op, DAG);
   case X86ISD::CVTPS2PH:        return LowerCVTPS2PH(Op, DAG);
   case ISD::PREFETCH:           return LowerPREFETCH(Op, Subtarget, DAG);
+  case ISD::FLDEXP:
+    return LowerFLDEXP(Op, DAG);
   }
 }
 
@@ -57389,3 +57397,47 @@ Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
     return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
   return TargetLowering::getPrefLoopAlignment();
 }
+
+SDValue X86TargetLowering::LowerFLDEXP(SDValue Op, SelectionDAG &DAG) const {
+  SDValue X = Op.getOperand(0);
+  EVT XScalarTy = X.getValueType();
+  SDValue Exp = Op.getOperand(1);
+
+  SDLoc DL(Op);
+  EVT XVT, ExpVT;
+  SDValue IID;
+  switch (Op.getSimpleValueType().SimpleTy) {
+  default:
+    return SDValue();
+  case MVT::f16:
+    X = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, X);
+  case MVT::f32:
+    XVT = MVT::v4f32;
+    ExpVT = MVT::v4f32;
+    IID = DAG.getConstant(Intrinsic::x86_avx512_mask_scalef_ss, DL, MVT::i64);
+    break;
+  case MVT::f64:
+    XVT = MVT::v2f64;
+    ExpVT = MVT::v2f64;
+    IID = DAG.getConstant(Intrinsic::x86_avx512_mask_scalef_sd, DL, MVT::i64);
+    break;
+  }
+
+  SDValue Zero = DAG.getConstant(0, DL, MVT::i64);
+  Exp = DAG.getNode(ISD::SINT_TO_FP, DL, X.getValueType(), Exp);
+  SDValue VX =
+      DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, XVT, DAG.getUNDEF(XVT), X, Zero);
+  SDValue VExp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ExpVT,
+                             DAG.getUNDEF(ExpVT), Exp, Zero);
+  SDValue Pg = DAG.getConstant(-1, DL, MVT::i8);
+  // _MM_FROUND_CUR_DIRECTION (4, no rounding).
+  SDValue Round = DAG.getConstant(4, DL, MVT::i32);
+  SDValue Scalef = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XVT,
+                               {IID, VX, VExp, VX, Pg, Round});
+  SDValue Final =
+      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, X.getValueType(), Scalef, Zero);
+  if (X.getValueType() != XScalarTy)
+    Final = DAG.getNode(ISD::FP_ROUND, DL, XScalarTy, Final,
+                        DAG.getIntPtrConstant(1, SDLoc(Op)));
+  return Final;
+}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 8046f42736951cd..26a9d0e10d4578f 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1700,6 +1700,8 @@ namespace llvm {
                         const SmallVectorImpl<SDValue> &OutVals,
                         const SDLoc &dl, SelectionDAG &DAG) const override;
 
+    SDValue LowerFLDEXP(SDValue Op, SelectionDAG &DAG) const;
+
     bool supportSplitCSR(MachineFunction *MF) const override {
       return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
           MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
diff --git a/llvm/test/CodeGen/X86/call-ldexp.ll b/llvm/test/CodeGen/X86/call-ldexp.ll
new file mode 100644
index 000000000000000..ed5abdb8968ca0c
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-ldexp.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=x86_64 -mattr=+avx512f < %s -o - | FileCheck %s
+
+define double @testExp(double %val, i32 %a) {
+; CHECK-LABEL: testExp:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vcvtsi2sd %edi, %xmm1, %xmm1
+; CHECK-NEXT:    vscalefsd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+entry:
+  %call = tail call fast double @ldexp(double %val, i32 %a)
+  ret double %call
+}
+
+declare double @ldexp(double, i32) #1
+
+define float @testExpf(float %val, i32 %a) {
+; CHECK-LABEL: testExpf:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vcvtsi2ss %edi, %xmm1, %xmm1
+; CHECK-NEXT:    vscalefss %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+entry:
+  %call = tail call fast float @ldexpf(float %val, i32 %a)
+  ret float %call
+}
+
+declare float @ldexpf(float, i32) #1
+
+define fp128 @testExpl(fp128 %val, i32 %a) {
+; CHECK-LABEL: testExpl:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    jmp ldexpl at PLT # TAILCALL
+entry:
+  %call = tail call fast fp128 @ldexpl(fp128 %val, i32 %a)
+  ret fp128 %call
+}
+
+declare fp128 @ldexpl(fp128, i32) #1
+
+define half @testExpf16(half %val, i32 %a) {
+; CHECK-LABEL: testExpf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpextrw $0, %xmm0, %eax
+; CHECK-NEXT:    vcvtsi2ss %edi, %xmm1, %xmm0
+; CHECK-NEXT:    movzwl %ax, %eax
+; CHECK-NEXT:    vmovd %eax, %xmm1
+; CHECK-NEXT:    vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT:    vscalefss %xmm0, %xmm1, %xmm0
+; CHECK-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; CHECK-NEXT:    vmovd %xmm0, %eax
+; CHECK-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+entry:
+  %0 = tail call fast half @llvm.ldexp.f16.i32(half %val, i32 %a)
+  ret half %0
+}
+
+declare half @llvm.ldexp.f16.i32(half, i32) #1
+
+attributes #1 = { mustprogress nofree nosync nounwind willreturn memory(none) }
+
diff --git a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
index 8d98ec7eaac2a4c..7c0b7e2aa8d2609 100644
--- a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
+++ b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll
@@ -79,38 +79,38 @@ define <4 x float> @fmul_pow2_ldexp_4xfloat(<4 x i32> %i) {
 ; CHECK-SSE-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-SSE-NEXT:    retq
 ;
-; CHECK-AVX-LABEL: fmul_pow2_ldexp_4xfloat:
-; CHECK-AVX:       # %bb.0:
-; CHECK-AVX-NEXT:    subq $40, %rsp
-; CHECK-AVX-NEXT:    .cfi_def_cfa_offset 48
-; CHECK-AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-AVX-NEXT:    vextractps $1, %xmm0, %edi
-; CHECK-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX-NEXT:    callq ldexpf at PLT
-; CHECK-AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-AVX-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-AVX-NEXT:    vmovd %xmm0, %edi
-; CHECK-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX-NEXT:    callq ldexpf at PLT
-; CHECK-AVX-NEXT:    vinsertps $16, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; CHECK-AVX-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[2,3]
-; CHECK-AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-AVX-NEXT:    vextractps $2, %xmm0, %edi
-; CHECK-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX-NEXT:    callq ldexpf at PLT
-; CHECK-AVX-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; CHECK-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; CHECK-AVX-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; CHECK-AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-AVX-NEXT:    vextractps $3, %xmm0, %edi
-; CHECK-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX-NEXT:    callq ldexpf at PLT
-; CHECK-AVX-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
-; CHECK-AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; CHECK-AVX-NEXT:    addq $40, %rsp
-; CHECK-AVX-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-AVX-NEXT:    retq
+; CHECK-AVX2-LABEL: fmul_pow2_ldexp_4xfloat:
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:    subq $40, %rsp
+; CHECK-AVX2-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-AVX2-NEXT:    vextractps $1, %xmm0, %edi
+; CHECK-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT:    callq ldexpf at PLT
+; CHECK-AVX2-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-AVX2-NEXT:    vmovd %xmm0, %edi
+; CHECK-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT:    callq ldexpf at PLT
+; CHECK-AVX2-NEXT:    vinsertps $16, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; CHECK-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; CHECK-AVX2-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-AVX2-NEXT:    vextractps $2, %xmm0, %edi
+; CHECK-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT:    callq ldexpf at PLT
+; CHECK-AVX2-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; CHECK-AVX2-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-AVX2-NEXT:    vextractps $3, %xmm0, %edi
+; CHECK-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX2-NEXT:    callq ldexpf at PLT
+; CHECK-AVX2-NEXT:    vmovaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; CHECK-AVX2-NEXT:    addq $40, %rsp
+; CHECK-AVX2-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-AVX2-NEXT:    retq
   %r = call <4 x float> @llvm.ldexp.v4f32.v4i32(<4 x float> <float 9.000000e+00, float 9.000000e+00, float 9.000000e+00, float 9.000000e+00>, <4 x i32> %i)
   ret <4 x float> %r
 }
@@ -648,88 +648,70 @@ define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) {
 ;
 ; CHECK-AVX512F-LABEL: fmul_pow2_ldexp_8xhalf:
 ; CHECK-AVX512F:       # %bb.0:
-; CHECK-AVX512F-NEXT:    subq $72, %rsp
-; CHECK-AVX512F-NEXT:    .cfi_def_cfa_offset 80
-; CHECK-AVX512F-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; CHECK-AVX512F-NEXT:    vpextrw $7, %xmm0, %edi
-; CHECK-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX512F-NEXT:    callq ldexpf at PLT
-; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vmovd %xmm0, %eax
-; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-AVX512F-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
-; CHECK-AVX512F-NEXT:    vpextrw $6, %xmm0, %edi
-; CHECK-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX512F-NEXT:    callq ldexpf at PLT
-; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vmovd %xmm0, %eax
-; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; CHECK-AVX512F-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; CHECK-AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-AVX512F-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
-; CHECK-AVX512F-NEXT:    vpextrw $5, %xmm0, %edi
-; CHECK-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX512F-NEXT:    callq ldexpf at PLT
-; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vmovd %xmm0, %eax
-; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-AVX512F-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
-; CHECK-AVX512F-NEXT:    vpextrw $4, %xmm0, %edi
-; CHECK-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX512F-NEXT:    callq ldexpf at PLT
-; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vmovd %xmm0, %eax
-; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; CHECK-AVX512F-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; CHECK-AVX512F-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; CHECK-AVX512F-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; CHECK-AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-AVX512F-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
-; CHECK-AVX512F-NEXT:    vpextrw $3, %xmm0, %edi
-; CHECK-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX512F-NEXT:    callq ldexpf at PLT
-; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vmovd %xmm0, %eax
-; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-AVX512F-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
-; CHECK-AVX512F-NEXT:    vpextrw $2, %xmm0, %edi
-; CHECK-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX512F-NEXT:    callq ldexpf at PLT
-; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vmovd %xmm0, %eax
-; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; CHECK-AVX512F-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; CHECK-AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-AVX512F-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
-; CHECK-AVX512F-NEXT:    vpextrw $1, %xmm0, %edi
-; CHECK-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX512F-NEXT:    callq ldexpf at PLT
-; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vmovd %xmm0, %eax
-; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-AVX512F-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; CHECK-AVX512F-NEXT:    vpextrw $7, %xmm0, %eax
+; CHECK-AVX512F-NEXT:    cwtl
+; CHECK-AVX512F-NEXT:    vcvtsi2ss %eax, %xmm1, %xmm2
+; CHECK-AVX512F-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-AVX512F-NEXT:    vscalefss %xmm2, %xmm1, %xmm2
+; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; CHECK-AVX512F-NEXT:    vmovd %xmm2, %eax
+; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; CHECK-AVX512F-NEXT:    vpextrw $6, %xmm0, %eax
+; CHECK-AVX512F-NEXT:    cwtl
+; CHECK-AVX512F-NEXT:    vcvtsi2ss %eax, %xmm3, %xmm3
+; CHECK-AVX512F-NEXT:    vscalefss %xmm3, %xmm1, %xmm3
+; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; CHECK-AVX512F-NEXT:    vmovd %xmm3, %eax
+; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; CHECK-AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; CHECK-AVX512F-NEXT:    vpextrw $5, %xmm0, %eax
+; CHECK-AVX512F-NEXT:    cwtl
+; CHECK-AVX512F-NEXT:    vcvtsi2ss %eax, %xmm4, %xmm3
+; CHECK-AVX512F-NEXT:    vscalefss %xmm3, %xmm1, %xmm3
+; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; CHECK-AVX512F-NEXT:    vmovd %xmm3, %eax
+; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; CHECK-AVX512F-NEXT:    vpextrw $4, %xmm0, %eax
+; CHECK-AVX512F-NEXT:    cwtl
+; CHECK-AVX512F-NEXT:    vcvtsi2ss %eax, %xmm4, %xmm4
+; CHECK-AVX512F-NEXT:    vscalefss %xmm4, %xmm1, %xmm4
+; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; CHECK-AVX512F-NEXT:    vmovd %xmm4, %eax
+; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; CHECK-AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; CHECK-AVX512F-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; CHECK-AVX512F-NEXT:    vpextrw $3, %xmm0, %eax
+; CHECK-AVX512F-NEXT:    cwtl
+; CHECK-AVX512F-NEXT:    vcvtsi2ss %eax, %xmm5, %xmm3
+; CHECK-AVX512F-NEXT:    vscalefss %xmm3, %xmm1, %xmm3
+; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; CHECK-AVX512F-NEXT:    vmovd %xmm3, %eax
+; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; CHECK-AVX512F-NEXT:    vpextrw $2, %xmm0, %eax
+; CHECK-AVX512F-NEXT:    cwtl
+; CHECK-AVX512F-NEXT:    vcvtsi2ss %eax, %xmm5, %xmm4
+; CHECK-AVX512F-NEXT:    vscalefss %xmm4, %xmm1, %xmm4
+; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; CHECK-AVX512F-NEXT:    vmovd %xmm4, %eax
+; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; CHECK-AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; CHECK-AVX512F-NEXT:    vpextrw $1, %xmm0, %eax
+; CHECK-AVX512F-NEXT:    cwtl
+; CHECK-AVX512F-NEXT:    vcvtsi2ss %eax, %xmm5, %xmm4
+; CHECK-AVX512F-NEXT:    vscalefss %xmm4, %xmm1, %xmm4
+; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; CHECK-AVX512F-NEXT:    vmovd %xmm4, %eax
+; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
 ; CHECK-AVX512F-NEXT:    vmovd %xmm0, %eax
-; CHECK-AVX512F-NEXT:    movzwl %ax, %edi
-; CHECK-AVX512F-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-AVX512F-NEXT:    callq ldexpf at PLT
+; CHECK-AVX512F-NEXT:    cwtl
+; CHECK-AVX512F-NEXT:    vcvtsi2ss %eax, %xmm5, %xmm0
+; CHECK-AVX512F-NEXT:    vscalefss %xmm0, %xmm1, %xmm0
 ; CHECK-AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
 ; CHECK-AVX512F-NEXT:    vmovd %xmm0, %eax
 ; CHECK-AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; CHECK-AVX512F-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; CHECK-AVX512F-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; CHECK-AVX512F-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; CHECK-AVX512F-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; CHECK-AVX512F-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; CHECK-AVX512F-NEXT:    # xmm0 = xmm0[0],mem[0]
-; CHECK-AVX512F-NEXT:    addq $72, %rsp
-; CHECK-AVX512F-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; CHECK-AVX512F-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; CHECK-AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; CHECK-AVX512F-NEXT:    retq
   %r = call <8 x half> @llvm.ldexp.v8f16.v8i16(<8 x half> <half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000, half 0xH7000>, <8 x i16> %i)
   ret <8 x half> %r



More information about the llvm-commits mailing list