[llvm] 8f04d81 - [SelectionDAG][RISCV] Mask constants to narrow size in TargetLowering::expandUnalignedStore.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 18 09:10:41 PDT 2023
Author: Craig Topper
Date: 2023-09-18T09:10:19-07:00
New Revision: 8f04d81ede8900b05c91c56e4db75f3e9a2cce25
URL: https://github.com/llvm/llvm-project/commit/8f04d81ede8900b05c91c56e4db75f3e9a2cce25
DIFF: https://github.com/llvm/llvm-project/commit/8f04d81ede8900b05c91c56e4db75f3e9a2cce25.diff
LOG: [SelectionDAG][RISCV] Mask constants to narrow size in TargetLowering::expandUnalignedStore.
If the SRL for Hi constant folds, but we don't remoe those bits from
the Lo, we can end up with strange constant folding through DAGCombine later.
I've only seen this with constants being lowered to constant pools
during lowering on RISC-V.
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/test/CodeGen/RISCV/unaligned-load-store.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 23c1486f711d727..aa367166e2a359e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -9558,6 +9558,14 @@ SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
SDValue ShiftAmount = DAG.getConstant(
NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout()));
SDValue Lo = Val;
+ // If Val is a constant, replace the upper bits with 0. The SRL will constant
+ // fold and not use the upper bits. A smaller constant may be easier to
+ // materialize.
+ if (auto *C = dyn_cast<ConstantSDNode>(Lo); C && !C->isOpaque())
+ Lo = DAG.getNode(
+ ISD::AND, dl, VT, Lo,
+ DAG.getConstant(APInt::getLowBitsSet(VT.getSizeInBits(), NumBits), dl,
+ VT));
SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
// Store the two parts
diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
index 5626c353f1cd5af..ce0d8fedbfb88f2 100644
--- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
@@ -416,57 +416,26 @@ define void @merge_stores_i32_i64(ptr %p) {
ret void
}
-; FIXME: We shouldn't generate multiple constant pools entries with shifted
-; values.
-;.LCPI0_0:
-; .quad 280223976814164 # 0xfedcba987654
-;.LCPI0_1:
-; .quad 71737338064426034 # 0xfedcba98765432
-;.LCPI0_2:
-; .quad -81985529216486896 # 0xfedcba9876543210
define void @store_large_constant(ptr %x) {
-; RV32I-LABEL: store_large_constant:
-; RV32I: # %bb.0:
-; RV32I-NEXT: li a1, 254
-; RV32I-NEXT: sb a1, 7(a0)
-; RV32I-NEXT: li a1, 220
-; RV32I-NEXT: sb a1, 6(a0)
-; RV32I-NEXT: li a1, 186
-; RV32I-NEXT: sb a1, 5(a0)
-; RV32I-NEXT: li a1, 152
-; RV32I-NEXT: sb a1, 4(a0)
-; RV32I-NEXT: li a1, 118
-; RV32I-NEXT: sb a1, 3(a0)
-; RV32I-NEXT: li a1, 84
-; RV32I-NEXT: sb a1, 2(a0)
-; RV32I-NEXT: li a1, 50
-; RV32I-NEXT: sb a1, 1(a0)
-; RV32I-NEXT: li a1, 16
-; RV32I-NEXT: sb a1, 0(a0)
-; RV32I-NEXT: ret
-;
-; RV64I-LABEL: store_large_constant:
-; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 254
-; RV64I-NEXT: sb a1, 7(a0)
-; RV64I-NEXT: li a1, 220
-; RV64I-NEXT: sb a1, 6(a0)
-; RV64I-NEXT: li a1, 186
-; RV64I-NEXT: sb a1, 5(a0)
-; RV64I-NEXT: li a1, 152
-; RV64I-NEXT: sb a1, 4(a0)
-; RV64I-NEXT: li a1, 118
-; RV64I-NEXT: lui a2, %hi(.LCPI16_0)
-; RV64I-NEXT: ld a2, %lo(.LCPI16_0)(a2)
-; RV64I-NEXT: lui a3, %hi(.LCPI16_1)
-; RV64I-NEXT: ld a3, %lo(.LCPI16_1)(a3)
-; RV64I-NEXT: lui a4, %hi(.LCPI16_2)
-; RV64I-NEXT: ld a4, %lo(.LCPI16_2)(a4)
-; RV64I-NEXT: sb a1, 3(a0)
-; RV64I-NEXT: sb a2, 2(a0)
-; RV64I-NEXT: sb a3, 1(a0)
-; RV64I-NEXT: sb a4, 0(a0)
-; RV64I-NEXT: ret
+; SLOW-LABEL: store_large_constant:
+; SLOW: # %bb.0:
+; SLOW-NEXT: li a1, 254
+; SLOW-NEXT: sb a1, 7(a0)
+; SLOW-NEXT: li a1, 220
+; SLOW-NEXT: sb a1, 6(a0)
+; SLOW-NEXT: li a1, 186
+; SLOW-NEXT: sb a1, 5(a0)
+; SLOW-NEXT: li a1, 152
+; SLOW-NEXT: sb a1, 4(a0)
+; SLOW-NEXT: li a1, 118
+; SLOW-NEXT: sb a1, 3(a0)
+; SLOW-NEXT: li a1, 84
+; SLOW-NEXT: sb a1, 2(a0)
+; SLOW-NEXT: li a1, 50
+; SLOW-NEXT: sb a1, 1(a0)
+; SLOW-NEXT: li a1, 16
+; SLOW-NEXT: sb a1, 0(a0)
+; SLOW-NEXT: ret
;
; RV32I-FAST-LABEL: store_large_constant:
; RV32I-FAST: # %bb.0:
More information about the llvm-commits
mailing list