[llvm] c66b697 - [RISCV] Don't use zero-stride vector load if there's no optimized u-arch

via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 13 21:52:05 PST 2022


Author: wangpc
Date: 2022-11-14T13:51:30+08:00
New Revision: c66b69777cc9d6540dfa236ea4a4108864b2af10

URL: https://github.com/llvm/llvm-project/commit/c66b69777cc9d6540dfa236ea4a4108864b2af10
DIFF: https://github.com/llvm/llvm-project/commit/c66b69777cc9d6540dfa236ea4a4108864b2af10.diff

LOG: [RISCV] Don't use zero-stride vector load if there's no optimized u-arch

For vector strided instructions, as the RVV spec says:

> When rs2=x0, then an implementation is allowed, but not required, to
> perform fewer memory operations than the number of active elements, and
> may perform different numbers of memory operations across different
> dynamic executions of the same static instruction.

So compiler shouldn't assume that fewer memory operations will be
performed when rs2=x0.

We add a target feature to specify whether u-arch supports optimized
zero-stride vector load. And we do vector splat optimization iff this
feature is supported.

This feature is enabled by default since most designs implement this
optimization.

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D137699

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCV.td
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVSubtarget.h
    llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td
index 3813769a73dc..d54384c25c0d 100644
--- a/llvm/lib/Target/RISCV/RISCV.td
+++ b/llvm/lib/Target/RISCV/RISCV.td
@@ -452,6 +452,11 @@ def FeatureUnalignedScalarMem
                       "true", "Has reasonably performant unaligned scalar "
                       "loads and stores">;
 
+def TuneNoOptimizedZeroStrideLoad
+   : SubtargetFeature<"no-optimized-zero-stride-load", "HasOptimizedZeroStrideLoad",
+                      "false", "Hasn't optimized (perform fewer memory operations)"
+                      "zero-stride vector load">;
+
 def TuneLUIADDIFusion
     : SubtargetFeature<"lui-addi-fusion", "HasLUIADDIFusion",
                        "true", "Enable LUI+ADDI macrofusion">;

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 6e3a2a38b81e..16a0ca4c4956 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1790,6 +1790,10 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
   case RISCVISD::VFMV_S_F_VL:
   case RISCVISD::VMV_V_X_VL:
   case RISCVISD::VFMV_V_F_VL: {
+    // Only if we have optimized zero-stride vector load.
+    if (!Subtarget->hasOptimizedZeroStrideLoad())
+      break;
+
     // Try to match splat of a scalar load to a strided load with stride of x0.
     bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
                         Node->getOpcode() == RISCVISD::VFMV_S_F_VL;

diff  --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 456dc00999ab..f79f9b4bdd4e 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -101,6 +101,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
   bool HasShortForwardBranchOpt = false;
   bool HasLUIADDIFusion = false;
   bool HasForcedAtomics = false;
+  bool HasOptimizedZeroStrideLoad = true;
   unsigned XLen = 32;
   unsigned ZvlLen = 0;
   MVT XLenVT = MVT::i32;
@@ -199,6 +200,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
   bool enableUnalignedScalarMem() const { return EnableUnalignedScalarMem; }
   bool hasLUIADDIFusion() const { return HasLUIADDIFusion; }
   bool hasForcedAtomics() const { return HasForcedAtomics; }
+  bool hasOptimizedZeroStrideLoad() const { return HasOptimizedZeroStrideLoad; }
   MVT getXLenVT() const { return XLenVT; }
   unsigned getXLen() const { return XLen; }
   unsigned getFLen() const {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
index 81fc7329be16..879d06cfee1f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll
@@ -1,8 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+f,+d,+zfh,+experimental-zvfh,+v -target-abi ilp32d -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s
+; RUN:   | FileCheck %s --check-prefixes=CHECK,OPTIMIZED
 ; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+zfh,+experimental-zvfh,+v -target-abi lp64d -verify-machineinstrs < %s \
-; RUN:   | FileCheck %s
+; RUN:   | FileCheck %s --check-prefixes=CHECK,OPTIMIZED
+; RUN: llc -mtriple=riscv32 -mattr=+f,+d,+zfh,+experimental-zvfh,+v,+no-optimized-zero-stride-load -target-abi ilp32d -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,NOT-OPTIMIZED
+; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+zfh,+experimental-zvfh,+v,+no-optimized-zero-stride-load -target-abi lp64d -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=CHECK,NOT-OPTIMIZED
 
 define <vscale x 8 x half> @vsplat_nxv8f16(half %f) {
 ; CHECK-LABEL: vsplat_nxv8f16:
@@ -72,11 +76,18 @@ define <vscale x 8 x double> @vsplat_zero_nxv8f64() {
 
 ; Test that we fold this to a vlse with 0 stride.
 define <vscale x 8 x float> @vsplat_load_nxv8f32(float* %ptr) {
-; CHECK-LABEL: vsplat_load_nxv8f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vlse32.v v8, (a0), zero
-; CHECK-NEXT:    ret
+; OPTIMIZED-LABEL: vsplat_load_nxv8f32:
+; OPTIMIZED:       # %bb.0:
+; OPTIMIZED-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; OPTIMIZED-NEXT:    vlse32.v v8, (a0), zero
+; OPTIMIZED-NEXT:    ret
+;
+; NOT-OPTIMIZED-LABEL: vsplat_load_nxv8f32:
+; NOT-OPTIMIZED:       # %bb.0:
+; NOT-OPTIMIZED-NEXT:    flw ft0, 0(a0)
+; NOT-OPTIMIZED-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; NOT-OPTIMIZED-NEXT:    vfmv.v.f v8, ft0
+; NOT-OPTIMIZED-NEXT:    ret
   %f = load float, float* %ptr
   %head = insertelement <vscale x 8 x float> poison, float %f, i32 0
   %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer


        


More information about the llvm-commits mailing list