[llvm] [RISCV] Use a vector MemVT when converting store+extractelt into a vector store. (PR #190107)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 2 09:20:19 PDT 2026
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/190107
>From 5bbb8ac5a7460ba7b7d5d2f203728878030e5c4c Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 1 Apr 2026 18:00:27 -0700
Subject: [PATCH 1/2] Pre-commit test
---
llvm/test/CodeGen/RISCV/rvv/pr189037.ll | 14 ++++++++++++++
1 file changed, 14 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/pr189037.ll
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr189037.ll b/llvm/test/CodeGen/RISCV/rvv/pr189037.ll
new file mode 100644
index 0000000000000..21d35f58377d9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/pr189037.ll
@@ -0,0 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+unaligned-scalar-mem | FileCheck %s
+
+; We should produce a vse8 due to the align 1
+define void @test(ptr %out, <1 x i16> %v) {
+; CHECK-LABEL: test:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %coerce.val.ii.i = extractelement <1 x i16> %v, i64 0
+ store i16 %coerce.val.ii.i, ptr %out, align 1
+ ret void
+}
>From 7033eb67ba83f34d09c815ad5cc09ec316a3a335 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 1 Apr 2026 21:39:59 -0700
Subject: [PATCH 2/2] [RISCV] Use a vector MemVT when converting
store+extractelt into a vector store.
This is needed so that we check for unaligned vector memory
support unaligned scalar memory support.
While there remove incorrect setting of the truncating store flag
on the vector instruction. And restrict the transform to simple stores
since we don't have tests for volatile or atomic.
Fixes #189037
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 10 ++++++----
llvm/test/CodeGen/RISCV/rvv/pr189037.ll | 4 ++--
2 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index fff62837ee310..d137794c0450f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -22435,16 +22435,18 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
SDValue Src = Val.getOperand(0);
MVT VecVT = Src.getSimpleValueType();
// VecVT should be scalable and memory VT should match the element type.
- if (!Store->isIndexed() && VecVT.isScalableVector() &&
- MemVT == VecVT.getVectorElementType()) {
+ if (!Store->isIndexed() && Store->isSimple() &&
+ VecVT.isScalableVector() && MemVT == VecVT.getVectorElementType()) {
SDLoc DL(N);
MVT MaskVT = getMaskTypeFor(VecVT);
+ // Create a vector memory VT so allowsMisalignedMemoryAccesses will
+ // work correctly.
+ MemVT = EVT::getVectorVT(*DAG.getContext(), MemVT, 1);
return DAG.getStoreVP(
Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
DAG.getConstant(1, DL, MaskVT),
DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
- Store->getMemOperand(), Store->getAddressingMode(),
- Store->isTruncatingStore(), /*IsCompress*/ false);
+ Store->getMemOperand(), Store->getAddressingMode());
}
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr189037.ll b/llvm/test/CodeGen/RISCV/rvv/pr189037.ll
index 21d35f58377d9..b0165d2073f13 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr189037.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr189037.ll
@@ -5,8 +5,8 @@
define void @test(ptr %out, <1 x i16> %v) {
; CHECK-LABEL: test:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%coerce.val.ii.i = extractelement <1 x i16> %v, i64 0
store i16 %coerce.val.ii.i, ptr %out, align 1
More information about the llvm-commits
mailing list