[llvm] fff3e1d - [x86] enable fast sqrtss/sqrtps tuning for AMD Zen cores

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 4 10:59:29 PST 2022


Author: Sanjay Patel
Date: 2022-02-04T13:59:20-05:00
New Revision: fff3e1dbaa9ee2d91dc15b39defa88346f03a4c2

URL: https://github.com/llvm/llvm-project/commit/fff3e1dbaa9ee2d91dc15b39defa88346f03a4c2
DIFF: https://github.com/llvm/llvm-project/commit/fff3e1dbaa9ee2d91dc15b39defa88346f03a4c2.diff

LOG: [x86] enable fast sqrtss/sqrtps tuning for AMD Zen cores

As discussed in D118534, all of the recent AMD CPUs have
relatively fast (<14 cycle latency) "sqrtss" and "sqrtps"
instructions:
https://uops.info/table.html?search=sqrtps&cb_lat=on&cb_tp=on&cb_SNB=on&cb_SKL=on&cb_ZENp=on&cb_ZEN2=on&cb_ZEN3=on&cb_measurements=on&cb_avx=on&cb_sse=on

So we should set this tuning flag to alter codegen of plain
"sqrt(X)" expansion (as opposed to reciprocal-sqrt - there
is other test coverage for that pattern). The expansion is
both slower and less accurate than the hardware instruction.

Differential Revision: https://reviews.llvm.org/D119001

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86.td
    llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td
index 380507308c3dd..bafba2ee09c37 100644
--- a/llvm/lib/Target/X86/X86.td
+++ b/llvm/lib/Target/X86/X86.td
@@ -1169,6 +1169,8 @@ def ProcessorFeatures {
                                      TuningFastBEXTR,
                                      TuningFast15ByteNOP,
                                      TuningBranchFusion,
+                                     TuningFastScalarFSQRT,
+                                     TuningFastVectorFSQRT,
                                      TuningFastScalarShiftMasks,
                                      TuningFastMOVBE,
                                      TuningSlowSHLD,

diff  --git a/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll b/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll
index ab97dd6f4b33e..6d2fbe0364dbd 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath-tune.ll
@@ -2,9 +2,9 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=nehalem     | FileCheck %s --check-prefixes=NHM
 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=sandybridge | FileCheck %s --check-prefixes=FAST-SCALAR,SNB
 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=broadwell   | FileCheck %s --check-prefixes=FAST-SCALAR,BDW
-; RUN: llc < %s -mtriple=x86_64-- -mcpu=skylake     | FileCheck %s --check-prefixes=FAST-SCALAR,SKL
-; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver1      | FileCheck %s --check-prefixes=SLOW-SCALAR,ZN1
-; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver3      | FileCheck %s --check-prefixes=SLOW-SCALAR,ZN3
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=skylake     | FileCheck %s --check-prefixes=FAST-SCALAR,FAST-VECTOR
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver1      | FileCheck %s --check-prefixes=FAST-SCALAR,FAST-VECTOR
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver3      | FileCheck %s --check-prefixes=FAST-SCALAR,FAST-VECTOR
 
 define float @f32_no_daz(float %f) #0 {
 ; NHM-LABEL: f32_no_daz:
@@ -26,19 +26,6 @@ define float @f32_no_daz(float %f) #0 {
 ; FAST-SCALAR:       # %bb.0:
 ; FAST-SCALAR-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
 ; FAST-SCALAR-NEXT:    retq
-;
-; SLOW-SCALAR-LABEL: f32_no_daz:
-; SLOW-SCALAR:       # %bb.0:
-; SLOW-SCALAR-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm1
-; SLOW-SCALAR-NEXT:    vbroadcastss {{.*#+}} xmm3 = [NaN,NaN,NaN,NaN]
-; SLOW-SCALAR-NEXT:    vmulss %xmm1, %xmm0, %xmm2
-; SLOW-SCALAR-NEXT:    vfmadd213ss {{.*#+}} xmm1 = (xmm2 * xmm1) + mem
-; SLOW-SCALAR-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; SLOW-SCALAR-NEXT:    vandps %xmm3, %xmm0, %xmm0
-; SLOW-SCALAR-NEXT:    vcmpltss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; SLOW-SCALAR-NEXT:    vmulss %xmm1, %xmm2, %xmm1
-; SLOW-SCALAR-NEXT:    vandnps %xmm1, %xmm0, %xmm0
-; SLOW-SCALAR-NEXT:    retq
   %call = tail call fast float @llvm.sqrt.f32(float %f) #2
   ret float %call
 }
@@ -91,42 +78,10 @@ define <4 x float> @v4f32_no_daz(<4 x float> %f) #0 {
 ; BDW-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; BDW-NEXT:    retq
 ;
-; SKL-LABEL: v4f32_no_daz:
-; SKL:       # %bb.0:
-; SKL-NEXT:    vsqrtps %xmm0, %xmm0
-; SKL-NEXT:    retq
-;
-; ZN1-LABEL: v4f32_no_daz:
-; ZN1:       # %bb.0:
-; ZN1-NEXT:    vrsqrtps %xmm0, %xmm1
-; ZN1-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; ZN1-NEXT:    vbroadcastss {{.*#+}} xmm4 = [NaN,NaN,NaN,NaN]
-; ZN1-NEXT:    vmulps %xmm1, %xmm0, %xmm2
-; ZN1-NEXT:    vfmadd231ps {{.*#+}} xmm3 = (xmm2 * xmm1) + xmm3
-; ZN1-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; ZN1-NEXT:    vandps %xmm4, %xmm0, %xmm0
-; ZN1-NEXT:    vmulps %xmm1, %xmm2, %xmm1
-; ZN1-NEXT:    vmulps %xmm3, %xmm1, %xmm1
-; ZN1-NEXT:    vbroadcastss {{.*#+}} xmm3 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
-; ZN1-NEXT:    vcmpleps %xmm0, %xmm3, %xmm0
-; ZN1-NEXT:    vandps %xmm1, %xmm0, %xmm0
-; ZN1-NEXT:    retq
-;
-; ZN3-LABEL: v4f32_no_daz:
-; ZN3:       # %bb.0:
-; ZN3-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; ZN3-NEXT:    vrsqrtps %xmm0, %xmm1
-; ZN3-NEXT:    vbroadcastss {{.*#+}} xmm4 = [NaN,NaN,NaN,NaN]
-; ZN3-NEXT:    vmulps %xmm1, %xmm0, %xmm2
-; ZN3-NEXT:    vfmadd231ps {{.*#+}} xmm3 = (xmm2 * xmm1) + xmm3
-; ZN3-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; ZN3-NEXT:    vandps %xmm4, %xmm0, %xmm0
-; ZN3-NEXT:    vmulps %xmm1, %xmm2, %xmm1
-; ZN3-NEXT:    vmulps %xmm3, %xmm1, %xmm1
-; ZN3-NEXT:    vbroadcastss {{.*#+}} xmm3 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
-; ZN3-NEXT:    vcmpleps %xmm0, %xmm3, %xmm0
-; ZN3-NEXT:    vandps %xmm1, %xmm0, %xmm0
-; ZN3-NEXT:    retq
+; FAST-VECTOR-LABEL: v4f32_no_daz:
+; FAST-VECTOR:       # %bb.0:
+; FAST-VECTOR-NEXT:    vsqrtps %xmm0, %xmm0
+; FAST-VECTOR-NEXT:    retq
   %call = tail call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> %f) #2
   ret <4 x float> %call
 }
@@ -194,42 +149,10 @@ define <8 x float> @v8f32_no_daz(<8 x float> %f) #0 {
 ; BDW-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; BDW-NEXT:    retq
 ;
-; SKL-LABEL: v8f32_no_daz:
-; SKL:       # %bb.0:
-; SKL-NEXT:    vsqrtps %ymm0, %ymm0
-; SKL-NEXT:    retq
-;
-; ZN1-LABEL: v8f32_no_daz:
-; ZN1:       # %bb.0:
-; ZN1-NEXT:    vrsqrtps %ymm0, %ymm1
-; ZN1-NEXT:    vbroadcastss {{.*#+}} ymm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; ZN1-NEXT:    vbroadcastss {{.*#+}} ymm4 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; ZN1-NEXT:    vmulps %ymm1, %ymm0, %ymm2
-; ZN1-NEXT:    vandps %ymm4, %ymm0, %ymm0
-; ZN1-NEXT:    vfmadd231ps {{.*#+}} ymm3 = (ymm2 * ymm1) + ymm3
-; ZN1-NEXT:    vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; ZN1-NEXT:    vmulps %ymm1, %ymm2, %ymm1
-; ZN1-NEXT:    vmulps %ymm3, %ymm1, %ymm1
-; ZN1-NEXT:    vbroadcastss {{.*#+}} ymm3 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
-; ZN1-NEXT:    vcmpleps %ymm0, %ymm3, %ymm0
-; ZN1-NEXT:    vandps %ymm1, %ymm0, %ymm0
-; ZN1-NEXT:    retq
-;
-; ZN3-LABEL: v8f32_no_daz:
-; ZN3:       # %bb.0:
-; ZN3-NEXT:    vbroadcastss {{.*#+}} ymm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; ZN3-NEXT:    vrsqrtps %ymm0, %ymm1
-; ZN3-NEXT:    vbroadcastss {{.*#+}} ymm4 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; ZN3-NEXT:    vmulps %ymm1, %ymm0, %ymm2
-; ZN3-NEXT:    vfmadd231ps {{.*#+}} ymm3 = (ymm2 * ymm1) + ymm3
-; ZN3-NEXT:    vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; ZN3-NEXT:    vandps %ymm4, %ymm0, %ymm0
-; ZN3-NEXT:    vmulps %ymm1, %ymm2, %ymm1
-; ZN3-NEXT:    vmulps %ymm3, %ymm1, %ymm1
-; ZN3-NEXT:    vbroadcastss {{.*#+}} ymm3 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
-; ZN3-NEXT:    vcmpleps %ymm0, %ymm3, %ymm0
-; ZN3-NEXT:    vandps %ymm1, %ymm0, %ymm0
-; ZN3-NEXT:    retq
+; FAST-VECTOR-LABEL: v8f32_no_daz:
+; FAST-VECTOR:       # %bb.0:
+; FAST-VECTOR-NEXT:    vsqrtps %ymm0, %ymm0
+; FAST-VECTOR-NEXT:    retq
   %call = tail call fast <8 x float> @llvm.sqrt.v8f32(<8 x float> %f) #2
   ret <8 x float> %call
 }
@@ -256,18 +179,6 @@ define float @f32_daz(float %f) #1 {
 ; FAST-SCALAR:       # %bb.0:
 ; FAST-SCALAR-NEXT:    vsqrtss %xmm0, %xmm0, %xmm0
 ; FAST-SCALAR-NEXT:    retq
-;
-; SLOW-SCALAR-LABEL: f32_daz:
-; SLOW-SCALAR:       # %bb.0:
-; SLOW-SCALAR-NEXT:    vrsqrtss %xmm0, %xmm0, %xmm1
-; SLOW-SCALAR-NEXT:    vmulss %xmm1, %xmm0, %xmm2
-; SLOW-SCALAR-NEXT:    vfmadd213ss {{.*#+}} xmm1 = (xmm2 * xmm1) + mem
-; SLOW-SCALAR-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; SLOW-SCALAR-NEXT:    vmulss %xmm1, %xmm2, %xmm1
-; SLOW-SCALAR-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; SLOW-SCALAR-NEXT:    vcmpeqss %xmm2, %xmm0, %xmm0
-; SLOW-SCALAR-NEXT:    vandnps %xmm1, %xmm0, %xmm0
-; SLOW-SCALAR-NEXT:    retq
   %call = tail call fast float @llvm.sqrt.f32(float %f) #2
   ret float %call
 }
@@ -315,38 +226,10 @@ define <4 x float> @v4f32_daz(<4 x float> %f) #1 {
 ; BDW-NEXT:    vandps %xmm1, %xmm0, %xmm0
 ; BDW-NEXT:    retq
 ;
-; SKL-LABEL: v4f32_daz:
-; SKL:       # %bb.0:
-; SKL-NEXT:    vsqrtps %xmm0, %xmm0
-; SKL-NEXT:    retq
-;
-; ZN1-LABEL: v4f32_daz:
-; ZN1:       # %bb.0:
-; ZN1-NEXT:    vrsqrtps %xmm0, %xmm1
-; ZN1-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; ZN1-NEXT:    vmulps %xmm1, %xmm0, %xmm2
-; ZN1-NEXT:    vfmadd231ps {{.*#+}} xmm3 = (xmm2 * xmm1) + xmm3
-; ZN1-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; ZN1-NEXT:    vmulps %xmm1, %xmm2, %xmm1
-; ZN1-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; ZN1-NEXT:    vcmpneqps %xmm2, %xmm0, %xmm0
-; ZN1-NEXT:    vmulps %xmm3, %xmm1, %xmm1
-; ZN1-NEXT:    vandps %xmm1, %xmm0, %xmm0
-; ZN1-NEXT:    retq
-;
-; ZN3-LABEL: v4f32_daz:
-; ZN3:       # %bb.0:
-; ZN3-NEXT:    vbroadcastss {{.*#+}} xmm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; ZN3-NEXT:    vrsqrtps %xmm0, %xmm1
-; ZN3-NEXT:    vmulps %xmm1, %xmm0, %xmm2
-; ZN3-NEXT:    vfmadd231ps {{.*#+}} xmm3 = (xmm2 * xmm1) + xmm3
-; ZN3-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; ZN3-NEXT:    vmulps %xmm1, %xmm2, %xmm1
-; ZN3-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; ZN3-NEXT:    vcmpneqps %xmm2, %xmm0, %xmm0
-; ZN3-NEXT:    vmulps %xmm3, %xmm1, %xmm1
-; ZN3-NEXT:    vandps %xmm1, %xmm0, %xmm0
-; ZN3-NEXT:    retq
+; FAST-VECTOR-LABEL: v4f32_daz:
+; FAST-VECTOR:       # %bb.0:
+; FAST-VECTOR-NEXT:    vsqrtps %xmm0, %xmm0
+; FAST-VECTOR-NEXT:    retq
   %call = tail call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> %f) #2
   ret <4 x float> %call
 }
@@ -405,38 +288,10 @@ define <8 x float> @v8f32_daz(<8 x float> %f) #1 {
 ; BDW-NEXT:    vandps %ymm1, %ymm0, %ymm0
 ; BDW-NEXT:    retq
 ;
-; SKL-LABEL: v8f32_daz:
-; SKL:       # %bb.0:
-; SKL-NEXT:    vsqrtps %ymm0, %ymm0
-; SKL-NEXT:    retq
-;
-; ZN1-LABEL: v8f32_daz:
-; ZN1:       # %bb.0:
-; ZN1-NEXT:    vrsqrtps %ymm0, %ymm1
-; ZN1-NEXT:    vbroadcastss {{.*#+}} ymm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; ZN1-NEXT:    vmulps %ymm1, %ymm0, %ymm2
-; ZN1-NEXT:    vfmadd231ps {{.*#+}} ymm3 = (ymm2 * ymm1) + ymm3
-; ZN1-NEXT:    vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; ZN1-NEXT:    vmulps %ymm1, %ymm2, %ymm1
-; ZN1-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; ZN1-NEXT:    vcmpneqps %ymm2, %ymm0, %ymm0
-; ZN1-NEXT:    vmulps %ymm3, %ymm1, %ymm1
-; ZN1-NEXT:    vandps %ymm1, %ymm0, %ymm0
-; ZN1-NEXT:    retq
-;
-; ZN3-LABEL: v8f32_daz:
-; ZN3:       # %bb.0:
-; ZN3-NEXT:    vbroadcastss {{.*#+}} ymm3 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
-; ZN3-NEXT:    vrsqrtps %ymm0, %ymm1
-; ZN3-NEXT:    vmulps %ymm1, %ymm0, %ymm2
-; ZN3-NEXT:    vfmadd231ps {{.*#+}} ymm3 = (ymm2 * ymm1) + ymm3
-; ZN3-NEXT:    vbroadcastss {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; ZN3-NEXT:    vmulps %ymm1, %ymm2, %ymm1
-; ZN3-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; ZN3-NEXT:    vcmpneqps %ymm2, %ymm0, %ymm0
-; ZN3-NEXT:    vmulps %ymm3, %ymm1, %ymm1
-; ZN3-NEXT:    vandps %ymm1, %ymm0, %ymm0
-; ZN3-NEXT:    retq
+; FAST-VECTOR-LABEL: v8f32_daz:
+; FAST-VECTOR:       # %bb.0:
+; FAST-VECTOR-NEXT:    vsqrtps %ymm0, %ymm0
+; FAST-VECTOR-NEXT:    retq
   %call = tail call fast <8 x float> @llvm.sqrt.v8f32(<8 x float> %f) #2
   ret <8 x float> %call
 }


        


More information about the llvm-commits mailing list