[llvm] 4aac78d - [RISCV] Generalize existing SRA combine to fix #101040. (#101610)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 2 09:03:02 PDT 2024
Author: Craig Topper
Date: 2024-08-02T09:02:58-07:00
New Revision: 4aac78dd4a89f16657c162320e3a720437cb7284
URL: https://github.com/llvm/llvm-project/commit/4aac78dd4a89f16657c162320e3a720437cb7284
DIFF: https://github.com/llvm/llvm-project/commit/4aac78dd4a89f16657c162320e3a720437cb7284.diff
LOG: [RISCV] Generalize existing SRA combine to fix #101040. (#101610)
We already had a DAG combine for (sra (sext_inreg (shl X, C1), i32), C2)
-> (sra (shl X, C1+32), C2+32) that we used for RV64. This patch
generalizes it to other sext_inregs for both RV32 and RV64.
Fixes #101040.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rv32zbb.ll
llvm/test/CodeGen/RISCV/rv64zbb.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9b2037adbf342..4a2193c8d5328 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1468,8 +1468,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN,
ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::MUL,
ISD::AND, ISD::OR, ISD::XOR, ISD::SETCC, ISD::SELECT});
- if (Subtarget.is64Bit())
- setTargetDAGCombine(ISD::SRA);
+ setTargetDAGCombine(ISD::SRA);
if (Subtarget.hasStdExtFOrZfinx())
setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM, ISD::FMUL});
@@ -15465,37 +15464,42 @@ static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
assert(N->getOpcode() == ISD::SRA && "Unexpected opcode");
- if (N->getValueType(0) != MVT::i64 || !Subtarget.is64Bit())
+ EVT VT = N->getValueType(0);
+
+ if (VT != Subtarget.getXLenVT())
return SDValue();
if (!isa<ConstantSDNode>(N->getOperand(1)))
return SDValue();
uint64_t ShAmt = N->getConstantOperandVal(1);
- if (ShAmt > 32)
- return SDValue();
SDValue N0 = N->getOperand(0);
- // Combine (sra (sext_inreg (shl X, C1), i32), C2) ->
- // (sra (shl X, C1+32), C2+32) so it gets selected as SLLI+SRAI instead of
- // SLLIW+SRAIW. SLLI+SRAI have compressed forms.
- if (ShAmt < 32 &&
- N0.getOpcode() == ISD::SIGN_EXTEND_INREG && N0.hasOneUse() &&
- cast<VTSDNode>(N0.getOperand(1))->getVT() == MVT::i32 &&
- N0.getOperand(0).getOpcode() == ISD::SHL && N0.getOperand(0).hasOneUse() &&
- isa<ConstantSDNode>(N0.getOperand(0).getOperand(1))) {
- uint64_t LShAmt = N0.getOperand(0).getConstantOperandVal(1);
- if (LShAmt < 32) {
- SDLoc ShlDL(N0.getOperand(0));
- SDValue Shl = DAG.getNode(ISD::SHL, ShlDL, MVT::i64,
- N0.getOperand(0).getOperand(0),
- DAG.getConstant(LShAmt + 32, ShlDL, MVT::i64));
- SDLoc DL(N);
- return DAG.getNode(ISD::SRA, DL, MVT::i64, Shl,
- DAG.getConstant(ShAmt + 32, DL, MVT::i64));
+ // Combine (sra (sext_inreg (shl X, C1), iX), C2) ->
+ // (sra (shl X, C1+(XLen-iX)), C2+(XLen-iX)) so it gets selected as SLLI+SRAI.
+ if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && N0.hasOneUse()) {
+ unsigned ExtSize =
+ cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
+ if (ShAmt < ExtSize && N0.getOperand(0).getOpcode() == ISD::SHL &&
+ N0.getOperand(0).hasOneUse() &&
+ isa<ConstantSDNode>(N0.getOperand(0).getOperand(1))) {
+ uint64_t LShAmt = N0.getOperand(0).getConstantOperandVal(1);
+ if (LShAmt < ExtSize) {
+ unsigned Size = VT.getSizeInBits();
+ SDLoc ShlDL(N0.getOperand(0));
+ SDValue Shl =
+ DAG.getNode(ISD::SHL, ShlDL, VT, N0.getOperand(0).getOperand(0),
+ DAG.getConstant(LShAmt + (Size - ExtSize), ShlDL, VT));
+ SDLoc DL(N);
+ return DAG.getNode(ISD::SRA, DL, VT, Shl,
+ DAG.getConstant(ShAmt + (Size - ExtSize), DL, VT));
+ }
}
}
+ if (ShAmt > 32 || VT != MVT::i64)
+ return SDValue();
+
// Combine (sra (shl X, 32), 32 - C) -> (shl (sext_inreg X, i32), C)
// FIXME: Should this be a generic combine? There's a similar combine on X86.
//
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index db100163b07b8..86e0d6b7b3f9d 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -1254,3 +1254,27 @@ define i64 @orc_b_i64(i64 %a) {
%2 = mul nuw i64 %1, 255
ret i64 %2
}
+
+define i32 @srai_slli(i16 signext %0) {
+; CHECK-LABEL: srai_slli:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 25
+; CHECK-NEXT: srai a0, a0, 31
+; CHECK-NEXT: ret
+ %2 = shl i16 %0, 9
+ %sext = ashr i16 %2, 15
+ %3 = sext i16 %sext to i32
+ ret i32 %3
+}
+
+define i32 @srai_slli2(i16 signext %0) {
+; CHECK-LABEL: srai_slli2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: slli a0, a0, 25
+; CHECK-NEXT: srai a0, a0, 30
+; CHECK-NEXT: ret
+ %2 = shl i16 %0, 9
+ %sext = ashr i16 %2, 14
+ %3 = sext i16 %sext to i32
+ ret i32 %3
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index d331a85589fca..53d7f77285e72 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -1476,3 +1476,61 @@ define i64 @orc_b_i64(i64 %a) {
%2 = mul nuw i64 %1, 255
ret i64 %2
}
+
+define i64 @srai_slli(i16 signext %0) {
+; RV64I-LABEL: srai_slli:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 57
+; RV64I-NEXT: srai a0, a0, 63
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: srai_slli:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: slli a0, a0, 57
+; RV64ZBB-NEXT: srai a0, a0, 63
+; RV64ZBB-NEXT: ret
+ %2 = shl i16 %0, 9
+ %sext = ashr i16 %2, 15
+ %3 = sext i16 %sext to i64
+ ret i64 %3
+}
+
+define i64 @srai_slli2(i16 signext %0) {
+; RV64I-LABEL: srai_slli2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 57
+; RV64I-NEXT: srai a0, a0, 62
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: srai_slli2:
+; RV64ZBB: # %bb.0:
+; RV64ZBB-NEXT: slli a0, a0, 57
+; RV64ZBB-NEXT: srai a0, a0, 62
+; RV64ZBB-NEXT: ret
+ %2 = shl i16 %0, 9
+ %sext = ashr i16 %2, 14
+ %3 = sext i16 %sext to i64
+ ret i64 %3
+}
+
+define signext i32 @func0000000000000001(i32 signext %0, i8 signext %1) #0 {
+; RV64I-LABEL: func0000000000000001:
+; RV64I: # %bb.0: # %entry
+; RV64I-NEXT: slli a1, a1, 59
+; RV64I-NEXT: srai a1, a1, 63
+; RV64I-NEXT: addw a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBB-LABEL: func0000000000000001:
+; RV64ZBB: # %bb.0: # %entry
+; RV64ZBB-NEXT: slli a1, a1, 59
+; RV64ZBB-NEXT: srai a1, a1, 63
+; RV64ZBB-NEXT: addw a0, a1, a0
+; RV64ZBB-NEXT: ret
+entry:
+ %2 = shl i8 %1, 3
+ %3 = ashr i8 %2, 7
+ %4 = sext i8 %3 to i32
+ %5 = add nsw i32 %4, %0
+ ret i32 %5
+}
More information about the llvm-commits
mailing list