[llvm] 2c1a900 - [PGO][PGSO] Add profile guided size optimization to X86 ISel Lowering.

Hiroshi Yamauchi via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 9 10:44:01 PDT 2020


Author: Hiroshi Yamauchi
Date: 2020-07-09T10:43:45-07:00
New Revision: 2c1a9006dd73e4e9f446fb6fa6cf2328eee1558f

URL: https://github.com/llvm/llvm-project/commit/2c1a9006dd73e4e9f446fb6fa6cf2328eee1558f
DIFF: https://github.com/llvm/llvm-project/commit/2c1a9006dd73e4e9f446fb6fa6cf2328eee1558f.diff

LOG: [PGO][PGSO] Add profile guided size optimization to X86 ISel Lowering.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/avx-vperm2x128.ll
    llvm/test/CodeGen/X86/phaddsub-extract.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index afb356e2cf96..cfbf38c56bc9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -34448,7 +34448,7 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
     return DAG.getBitcast(RootVT, V1);
   }
 
-  bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
+  bool OptForSize = DAG.shouldOptForSize();
   unsigned RootSizeInBits = RootVT.getSizeInBits();
   unsigned NumRootElts = RootVT.getVectorNumElements();
   unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
@@ -39290,7 +39290,7 @@ static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
   }
 
   // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
-  bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
+  bool OptForSize = DAG.shouldOptForSize();
   if (!Subtarget.hasFastHorizontalOps() && !OptForSize)
     return SDValue();
 

diff  --git a/llvm/test/CodeGen/X86/avx-vperm2x128.ll b/llvm/test/CodeGen/X86/avx-vperm2x128.ll
index 8310cc9b50a3..26a5cd328d5c 100644
--- a/llvm/test/CodeGen/X86/avx-vperm2x128.ll
+++ b/llvm/test/CodeGen/X86/avx-vperm2x128.ll
@@ -397,8 +397,7 @@ define <4 x double> @shuffle_v4f64_zz23_optsize(<4 x double> %a) optsize {
 define <4 x double> @shuffle_v4f64_zz23_pgso(<4 x double> %a) !prof !14 {
 ; ALL-LABEL: shuffle_v4f64_zz23_pgso:
 ; ALL:       # %bb.0:
-; ALL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[2,3]
 ; ALL-NEXT:    retq
   %s = shufflevector <4 x double> %a, <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
   ret <4 x double> %s
@@ -441,8 +440,7 @@ define <4 x double> @shuffle_v4f64_zz67_optsize(<4 x double> %a) optsize {
 define <4 x double> @shuffle_v4f64_zz67_pgso(<4 x double> %a) !prof !14 {
 ; ALL-LABEL: shuffle_v4f64_zz67_pgso:
 ; ALL:       # %bb.0:
-; ALL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; ALL-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; ALL-NEXT:    vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[2,3]
 ; ALL-NEXT:    retq
   %s = shufflevector <4 x double> <double 0.0, double 0.0, double undef, double undef>, <4 x double> %a, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
   ret <4 x double> %s

diff  --git a/llvm/test/CodeGen/X86/phaddsub-extract.ll b/llvm/test/CodeGen/X86/phaddsub-extract.ll
index f475a31b7d29..dd258c5f424a 100644
--- a/llvm/test/CodeGen/X86/phaddsub-extract.ll
+++ b/llvm/test/CodeGen/X86/phaddsub-extract.ll
@@ -2095,35 +2095,19 @@ define i32 @hadd32_4_optsize(<4 x i32> %x225) optsize {
 }
 
 define i32 @hadd32_4_pgso(<4 x i32> %x225) !prof !14 {
-; SSE3-SLOW-LABEL: hadd32_4_pgso:
-; SSE3-SLOW:       # %bb.0:
-; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE3-SLOW-NEXT:    paddd %xmm0, %xmm1
-; SSE3-SLOW-NEXT:    phaddd %xmm1, %xmm1
-; SSE3-SLOW-NEXT:    movd %xmm1, %eax
-; SSE3-SLOW-NEXT:    retq
-;
-; SSE3-FAST-LABEL: hadd32_4_pgso:
-; SSE3-FAST:       # %bb.0:
-; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
-; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
-; SSE3-FAST-NEXT:    movd %xmm0, %eax
-; SSE3-FAST-NEXT:    retq
-;
-; AVX-SLOW-LABEL: hadd32_4_pgso:
-; AVX-SLOW:       # %bb.0:
-; AVX-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX-SLOW-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX-SLOW-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
-; AVX-SLOW-NEXT:    vmovd %xmm0, %eax
-; AVX-SLOW-NEXT:    retq
+; SSE3-LABEL: hadd32_4_pgso:
+; SSE3:       # %bb.0:
+; SSE3-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-NEXT:    movd %xmm0, %eax
+; SSE3-NEXT:    retq
 ;
-; AVX-FAST-LABEL: hadd32_4_pgso:
-; AVX-FAST:       # %bb.0:
-; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
-; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
-; AVX-FAST-NEXT:    vmovd %xmm0, %eax
-; AVX-FAST-NEXT:    retq
+; AVX-LABEL: hadd32_4_pgso:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    retq
   %x226 = shufflevector <4 x i32> %x225, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
   %x227 = add <4 x i32> %x225, %x226
   %x228 = shufflevector <4 x i32> %x227, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>


        


More information about the llvm-commits mailing list