[llvm] [DAGCombiner] Forward vector store to vector load with extract_subvector (PR #145707)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 25 07:04:21 PDT 2025


https://github.com/joe-rivos created https://github.com/llvm/llvm-project/pull/145707

Loading a smaller fixed vector type from a stored larger fixed vector type
can be substituted with an extract_subvector, provided the smaller type
entirely contained in the larger type, and an extract_element would be
legal for the given offset.

Granted the result for RISCV is the same number of instructions, but we avoid the loads.

>From 3568e786ea44f492f97eb7816111b7cbc8377a77 Mon Sep 17 00:00:00 2001
From: Joseph Faulls <jofa at rivosinc.com>
Date: Wed, 25 Jun 2025 13:44:33 +0000
Subject: [PATCH 1/2] [DAGCombiner] Add pre-commit test for forwarding vector
 store

---
 llvm/test/CodeGen/RISCV/forward-vec-store.ll | 66 ++++++++++++++++++++
 1 file changed, 66 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/forward-vec-store.ll

diff --git a/llvm/test/CodeGen/RISCV/forward-vec-store.ll b/llvm/test/CodeGen/RISCV/forward-vec-store.ll
new file mode 100644
index 0000000000000..653c2f0089ddc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/forward-vec-store.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh -o - %s | FileCheck %s
+
+define void @forward_store(<32 x half> %halves, ptr %p, ptr %p2, ptr %p3, ptr %p4) {
+; CHECK-LABEL: forward_store:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a4, a0, 16
+; CHECK-NEXT:    li a5, 32
+; CHECK-NEXT:    vsetvli zero, a5, e16, m4, ta, ma
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    addi a5, a0, 32
+; CHECK-NEXT:    addi a0, a0, 48
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a4)
+; CHECK-NEXT:    vle16.v v9, (a5)
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    vse16.v v9, (a2)
+; CHECK-NEXT:    vse16.v v10, (a3)
+; CHECK-NEXT:    ret
+  store <32 x half> %halves, ptr %p, align 256
+  %gep1 = getelementptr inbounds nuw i8, ptr %p, i32 16
+  %gep2 = getelementptr inbounds nuw i8, ptr %p, i32 32
+  %gep3 = getelementptr inbounds nuw i8, ptr %p, i32 48
+  %ld1 = load <8 x half>, ptr %gep1, align 4
+  %ld2 = load <8 x half>, ptr %gep2, align 4
+  %ld3 = load <8 x half>, ptr %gep3, align 4
+  store <8 x half> %ld1, ptr %p2
+  store <8 x half> %ld2, ptr %p3
+  store <8 x half> %ld3, ptr %p4
+  ret void
+}
+
+define void @no_forward_store(<32 x half> %halves, ptr %p, ptr %p2, ptr %p3, ptr %p4) {
+; CHECK-LABEL: no_forward_store:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a4, a0, 8
+; CHECK-NEXT:    li a5, 32
+; CHECK-NEXT:    vsetvli zero, a5, e16, m4, ta, ma
+; CHECK-NEXT:    vse16.v v8, (a0)
+; CHECK-NEXT:    addi a5, a0, 16
+; CHECK-NEXT:    addi a0, a0, 64
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a4)
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v9, (a5)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vse32.v v9, (a2)
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vse16.v v10, (a3)
+; CHECK-NEXT:    ret
+  store <32 x half> %halves, ptr %p, align 256
+  %gep1 = getelementptr inbounds nuw i8, ptr %p, i32 8
+  %gep2 = getelementptr inbounds nuw i8, ptr %p, i32 16
+  %gep3 = getelementptr inbounds nuw i8, ptr %p, i32 64
+  %ld1 = load <8 x half>, ptr %gep1, align 4
+  %ld2 = load <4 x i32>, ptr %gep2, align 4
+  %ld3 = load <8 x half>, ptr %gep3, align 4
+  store <8 x half> %ld1, ptr %p2
+  store <4 x i32> %ld2, ptr %p3
+  store <8 x half> %ld3, ptr %p4
+  ret void
+}

>From 7f8bd0e194456cad59d66b5abef00e38ac19af49 Mon Sep 17 00:00:00 2001
From: Joseph Faulls <jofa at rivosinc.com>
Date: Wed, 25 Jun 2025 13:53:20 +0000
Subject: [PATCH 2/2] [DAGCombiner] Forward vector store to vector load with
 extract_subvector

Loading a smaller fixed vector type from a stored larger fixed vector type
can be substituted with an extract_subvector, provided the smaller type
entirely contained in the larger type, and an extract_element would be
legal for the given offset.
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 21 ++++++++++++++++++
 llvm/test/CodeGen/RISCV/forward-vec-store.ll  | 22 +++++++++----------
 2 files changed, 32 insertions(+), 11 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 91f696e8fe88e..6c213fd6de268 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -19913,6 +19913,27 @@ SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {
     }
   }
 
+  // Loading a smaller fixed vector type from a stored larger fixed vector type
+  // can be substituted with an extract_subvector, provided the smaller type
+  // entirely contained in the larger type, and an extract_element would be
+  // legal for the given offset.
+  if (TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, LDType) &&
+      LDType.isFixedLengthVector() && STType.isFixedLengthVector() &&
+      !ST->isTruncatingStore() && LD->getExtensionType() == ISD::NON_EXTLOAD &&
+      LDType.getVectorElementType() == STType.getVectorElementType() &&
+      (Offset * 8 + LDType.getFixedSizeInBits() <=
+       STType.getFixedSizeInBits()) &&
+      (Offset % LDType.getScalarStoreSize() == 0)) {
+    unsigned EltOffset = Offset / LDType.getScalarStoreSize();
+    // The extract index must be a multiple of the result's element count.
+    if (EltOffset % LDType.getVectorElementCount().getFixedValue() == 0) {
+      auto DL = SDLoc(LD);
+      SDValue VecIdx = DAG.getVectorIdxConstant(EltOffset, DL);
+      Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LDType, Val, VecIdx);
+      return ReplaceLd(LD, Val, Chain);
+    }
+  }
+
   // TODO: Deal with nonzero offset.
   if (LD->getBasePtr().isUndef() || Offset != 0)
     return SDValue();
diff --git a/llvm/test/CodeGen/RISCV/forward-vec-store.ll b/llvm/test/CodeGen/RISCV/forward-vec-store.ll
index 653c2f0089ddc..e8dad81eb1e47 100644
--- a/llvm/test/CodeGen/RISCV/forward-vec-store.ll
+++ b/llvm/test/CodeGen/RISCV/forward-vec-store.ll
@@ -4,19 +4,19 @@
 define void @forward_store(<32 x half> %halves, ptr %p, ptr %p2, ptr %p3, ptr %p4) {
 ; CHECK-LABEL: forward_store:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a4, a0, 16
-; CHECK-NEXT:    li a5, 32
-; CHECK-NEXT:    vsetvli zero, a5, e16, m4, ta, ma
+; CHECK-NEXT:    li a4, 32
+; CHECK-NEXT:    vsetivli zero, 8, e16, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v16, v8, 8
+; CHECK-NEXT:    vsetivli zero, 8, e16, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v12, v8, 16
+; CHECK-NEXT:    vsetvli zero, a4, e16, m4, ta, ma
 ; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    addi a5, a0, 32
-; CHECK-NEXT:    addi a0, a0, 48
+; CHECK-NEXT:    vsetivli zero, 8, e16, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 24
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vle16.v v8, (a4)
-; CHECK-NEXT:    vle16.v v9, (a5)
-; CHECK-NEXT:    vle16.v v10, (a0)
-; CHECK-NEXT:    vse16.v v8, (a1)
-; CHECK-NEXT:    vse16.v v9, (a2)
-; CHECK-NEXT:    vse16.v v10, (a3)
+; CHECK-NEXT:    vse16.v v16, (a1)
+; CHECK-NEXT:    vse16.v v12, (a2)
+; CHECK-NEXT:    vse16.v v8, (a3)
 ; CHECK-NEXT:    ret
   store <32 x half> %halves, ptr %p, align 256
   %gep1 = getelementptr inbounds nuw i8, ptr %p, i32 16



More information about the llvm-commits mailing list