[llvm] Add RV64 constraint to SRLIW (PR #69416)
Shao-Ce SUN via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 17 21:14:03 PDT 2023
https://github.com/sunshaoce created https://github.com/llvm/llvm-project/pull/69416
Related issue #69408
>From 5908e298ee628679bd2b642d64807740557963ca Mon Sep 17 00:00:00 2001
From: Shao-Ce SUN <sunshaoce at gmail.com>
Date: Wed, 18 Oct 2023 12:03:27 +0800
Subject: [PATCH] Add RV64 constraint to SRLIW
---
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 9bf1e12584aee39..7d33cc42b049bed 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -955,7 +955,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) where C2 has
// 32 leading zeros and C3 trailing zeros.
- if (ShAmt <= 32 && isShiftedMask_64(Mask)) {
+ if (ShAmt <= 32 && isShiftedMask_64(Mask) && Subtarget->is64Bit()) {
unsigned XLen = Subtarget->getXLen();
unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
unsigned TrailingZeros = llvm::countr_zero(Mask);
@@ -984,7 +984,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has
// 32 leading zeros and C3 trailing zeros.
- if (isShiftedMask_64(Mask) && N0.hasOneUse()) {
+ if (isShiftedMask_64(Mask) && N0.hasOneUse() && Subtarget->is64Bit()) {
unsigned XLen = Subtarget->getXLen();
unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
unsigned TrailingZeros = llvm::countr_zero(Mask);
@@ -1014,7 +1014,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (ShAmt >= TrailingOnes)
break;
// If the mask has 32 trailing ones, use SRLIW.
- if (TrailingOnes == 32) {
+ if (TrailingOnes == 32 && Subtarget->is64Bit()) {
SDNode *SRLIW =
CurDAG->getMachineNode(RISCV::SRLIW, DL, VT, N0->getOperand(0),
CurDAG->getTargetConstant(ShAmt, DL, VT));
@@ -1143,7 +1143,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
unsigned Leading = XLen - llvm::bit_width(C1);
if (C2 < Leading) {
// If the number of leading zeros is C2+32 this can be SRLIW.
- if (C2 + 32 == Leading) {
+ if (C2 + 32 == Leading && Subtarget->is64Bit()) {
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
ReplaceNode(Node, SRLIW);
@@ -1157,7 +1157,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// legalized and goes through DAG combine.
if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() &&
X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
- cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
+ cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32 &&
+ Subtarget->is64Bit()) {
SDNode *SRAIW =
CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
CurDAG->getTargetConstant(31, DL, VT));
@@ -1232,7 +1233,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
// shifted mask with c2 leading zeros and c3 trailing zeros.
- if (!LeftShift && isShiftedMask_64(C1)) {
+ if (!LeftShift && isShiftedMask_64(C1) && Subtarget->is64Bit()) {
unsigned Leading = XLen - llvm::bit_width(C1);
unsigned Trailing = llvm::countr_zero(C1);
if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
@@ -2680,7 +2681,7 @@ bool RISCVDAGToDAGISel::selectSHXADDOp(SDValue N, unsigned ShAmt,
if (N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
isa<ConstantSDNode>(N0.getOperand(1))) {
uint64_t Mask = N0.getConstantOperandVal(1);
- if (isShiftedMask_64(Mask)) {
+ if (isShiftedMask_64(Mask) && Subtarget->is64Bit()) {
unsigned C1 = N.getConstantOperandVal(1);
unsigned XLen = Subtarget->getXLen();
unsigned Leading = XLen - llvm::bit_width(Mask);
More information about the llvm-commits
mailing list