[llvm] [RISCV] Only convert volatile i64 load/store to Zilsd in SelectionDAG. (PR #169529)

via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 25 09:21:07 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-risc-v

Author: Craig Topper (topperc)

<details>
<summary>Changes</summary>

Rely on ZilsdOptimizer for non-volatile loads/stores. This can avoid extra moves.

Also use Zilsd for non-volatile stores of 0.

---
Full diff: https://github.com/llvm/llvm-project/pull/169529.diff


2 Files Affected:

- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+7-2) 
- (modified) llvm/test/CodeGen/RISCV/zilsd.ll (+19-32) 


``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 19a16197272fe..de60e8461e5d7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8404,7 +8404,12 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
       if (Store->isTruncatingStore())
         return SDValue();
 
-      if (Store->getAlign() < Subtarget.getZilsdAlign())
+      // Expand non-volatile or misaligned stores.
+      // Keep stores of 0 since that doesn't constrain the register allocator.
+      if (!(Store->isVolatile() ||
+            (isa<ConstantSDNode>(StoredVal) &&
+             cast<ConstantSDNode>(StoredVal)->isZero())) ||
+          Store->getAlign() < Subtarget.getZilsdAlign())
         return SDValue();
 
       SDLoc DL(Op);
@@ -14803,7 +14808,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
       assert(Subtarget.hasStdExtZilsd() && !Subtarget.is64Bit() &&
              "Unexpected custom legalisation");
 
-      if (Ld->getAlign() < Subtarget.getZilsdAlign())
+      if (!Ld->isVolatile() || Ld->getAlign() < Subtarget.getZilsdAlign())
         return;
 
       SDLoc DL(N);
diff --git a/llvm/test/CodeGen/RISCV/zilsd.ll b/llvm/test/CodeGen/RISCV/zilsd.ll
index 27b1ff76f6f05..427b1d83b9d8e 100644
--- a/llvm/test/CodeGen/RISCV/zilsd.ll
+++ b/llvm/test/CodeGen/RISCV/zilsd.ll
@@ -9,9 +9,10 @@
 define i64 @load(ptr %a) nounwind {
 ; CHECK-LABEL: load:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    mv a2, a0
-; CHECK-NEXT:    ld a0, 80(a0)
-; CHECK-NEXT:    ld zero, 0(a2)
+; CHECK-NEXT:    lw a2, 80(a0)
+; CHECK-NEXT:    lw a1, 84(a0)
+; CHECK-NEXT:    ld zero, 0(a0)
+; CHECK-NEXT:    mv a0, a2
 ; CHECK-NEXT:    ret
   %1 = getelementptr i64, ptr %a, i32 10
   %2 = load i64, ptr %1
@@ -44,10 +45,10 @@ define i64 @load_align4(ptr %a) nounwind {
 define void @store(ptr %a, i64 %b) nounwind {
 ; CHECK-LABEL: store:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    mv a3, a2
-; CHECK-NEXT:    mv a2, a1
-; CHECK-NEXT:    sd a2, 0(a0)
-; CHECK-NEXT:    sd a2, 88(a0)
+; CHECK-NEXT:    sw a1, 0(a0)
+; CHECK-NEXT:    sw a2, 4(a0)
+; CHECK-NEXT:    sw a1, 88(a0)
+; CHECK-NEXT:    sw a2, 92(a0)
 ; CHECK-NEXT:    ret
   store i64 %b, ptr %a
   %1 = getelementptr i64, ptr %a, i32 11
@@ -56,25 +57,11 @@ define void @store(ptr %a, i64 %b) nounwind {
 }
 
 define void @store_align4(ptr %a, i64 %b) nounwind {
-; SLOW-LABEL: store_align4:
-; SLOW:       # %bb.0:
-; SLOW-NEXT:    sw a1, 88(a0)
-; SLOW-NEXT:    sw a2, 92(a0)
-; SLOW-NEXT:    ret
-;
-; FAST-LABEL: store_align4:
-; FAST:       # %bb.0:
-; FAST-NEXT:    mv a3, a2
-; FAST-NEXT:    mv a2, a1
-; FAST-NEXT:    sd a2, 88(a0)
-; FAST-NEXT:    ret
-;
-; 4BYTEALIGN-LABEL: store_align4:
-; 4BYTEALIGN:       # %bb.0:
-; 4BYTEALIGN-NEXT:    mv a3, a2
-; 4BYTEALIGN-NEXT:    mv a2, a1
-; 4BYTEALIGN-NEXT:    sd a2, 88(a0)
-; 4BYTEALIGN-NEXT:    ret
+; CHECK-LABEL: store_align4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sw a1, 88(a0)
+; CHECK-NEXT:    sw a2, 92(a0)
+; CHECK-NEXT:    ret
   %1 = getelementptr i64, ptr %a, i32 11
   store i64 %b, ptr %1, align 4
   ret void
@@ -158,9 +145,8 @@ define void @store_unaligned(ptr %p, i64 %v) {
 ;
 ; FAST-LABEL: store_unaligned:
 ; FAST:       # %bb.0:
-; FAST-NEXT:    mv a3, a2
-; FAST-NEXT:    mv a2, a1
-; FAST-NEXT:    sd a2, 0(a0)
+; FAST-NEXT:    sw a1, 0(a0)
+; FAST-NEXT:    sw a2, 4(a0)
 ; FAST-NEXT:    ret
 ;
 ; 4BYTEALIGN-LABEL: store_unaligned:
@@ -213,10 +199,11 @@ define void @large_offset(ptr nocapture %p, i64 %d) nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lui a1, 4
 ; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    ld a2, -384(a0)
+; CHECK-NEXT:    lw a2, -384(a0)
+; CHECK-NEXT:    lw a1, -380(a0)
 ; CHECK-NEXT:    addi a2, a2, 1
-; CHECK-NEXT:    seqz a1, a2
-; CHECK-NEXT:    add a3, a3, a1
+; CHECK-NEXT:    seqz a3, a2
+; CHECK-NEXT:    add a3, a1, a3
 ; CHECK-NEXT:    sd a2, -384(a0)
 ; CHECK-NEXT:    ret
 entry:

``````````

</details>


https://github.com/llvm/llvm-project/pull/169529


More information about the llvm-commits mailing list