[llvm] Add RV64 constraint to SRLIW (PR #69416)
Shao-Ce SUN via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 17 21:43:05 PDT 2023
https://github.com/sunshaoce updated https://github.com/llvm/llvm-project/pull/69416
>From 5908e298ee628679bd2b642d64807740557963ca Mon Sep 17 00:00:00 2001
From: Shao-Ce SUN <sunshaoce at gmail.com>
Date: Wed, 18 Oct 2023 12:03:27 +0800
Subject: [PATCH 1/2] Add RV64 constraint to SRLIW
---
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 9bf1e12584aee39..7d33cc42b049bed 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -955,7 +955,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) where C2 has
// 32 leading zeros and C3 trailing zeros.
- if (ShAmt <= 32 && isShiftedMask_64(Mask)) {
+ if (ShAmt <= 32 && isShiftedMask_64(Mask) && Subtarget->is64Bit()) {
unsigned XLen = Subtarget->getXLen();
unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
unsigned TrailingZeros = llvm::countr_zero(Mask);
@@ -984,7 +984,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has
// 32 leading zeros and C3 trailing zeros.
- if (isShiftedMask_64(Mask) && N0.hasOneUse()) {
+ if (isShiftedMask_64(Mask) && N0.hasOneUse() && Subtarget->is64Bit()) {
unsigned XLen = Subtarget->getXLen();
unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
unsigned TrailingZeros = llvm::countr_zero(Mask);
@@ -1014,7 +1014,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (ShAmt >= TrailingOnes)
break;
// If the mask has 32 trailing ones, use SRLIW.
- if (TrailingOnes == 32) {
+ if (TrailingOnes == 32 && Subtarget->is64Bit()) {
SDNode *SRLIW =
CurDAG->getMachineNode(RISCV::SRLIW, DL, VT, N0->getOperand(0),
CurDAG->getTargetConstant(ShAmt, DL, VT));
@@ -1143,7 +1143,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
unsigned Leading = XLen - llvm::bit_width(C1);
if (C2 < Leading) {
// If the number of leading zeros is C2+32 this can be SRLIW.
- if (C2 + 32 == Leading) {
+ if (C2 + 32 == Leading && Subtarget->is64Bit()) {
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
ReplaceNode(Node, SRLIW);
@@ -1157,7 +1157,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// legalized and goes through DAG combine.
if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() &&
X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
- cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
+ cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32 &&
+ Subtarget->is64Bit()) {
SDNode *SRAIW =
CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
CurDAG->getTargetConstant(31, DL, VT));
@@ -1232,7 +1233,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
// shifted mask with c2 leading zeros and c3 trailing zeros.
- if (!LeftShift && isShiftedMask_64(C1)) {
+ if (!LeftShift && isShiftedMask_64(C1) && Subtarget->is64Bit()) {
unsigned Leading = XLen - llvm::bit_width(C1);
unsigned Trailing = llvm::countr_zero(C1);
if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
@@ -2680,7 +2681,7 @@ bool RISCVDAGToDAGISel::selectSHXADDOp(SDValue N, unsigned ShAmt,
if (N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
isa<ConstantSDNode>(N0.getOperand(1))) {
uint64_t Mask = N0.getConstantOperandVal(1);
- if (isShiftedMask_64(Mask)) {
+ if (isShiftedMask_64(Mask) && Subtarget->is64Bit()) {
unsigned C1 = N.getConstantOperandVal(1);
unsigned XLen = Subtarget->getXLen();
unsigned Leading = XLen - llvm::bit_width(Mask);
>From e8c97bc9173c3383629ca12c5b212f25d9fbd0d8 Mon Sep 17 00:00:00 2001
From: Shao-Ce SUN <sunshaoce at gmail.com>
Date: Wed, 18 Oct 2023 12:42:40 +0800
Subject: [PATCH 2/2] fixup! Revoke most erroneous operations
---
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 13 ++++++------
llvm/test/CodeGen/RISCV/aext.ll | 22 +++++++++++++++++++++
2 files changed, 28 insertions(+), 7 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/aext.ll
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 7d33cc42b049bed..d8f629906ce61c5 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -955,7 +955,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) where C2 has
// 32 leading zeros and C3 trailing zeros.
- if (ShAmt <= 32 && isShiftedMask_64(Mask) && Subtarget->is64Bit()) {
+ if (ShAmt <= 32 && isShiftedMask_64(Mask)) {
unsigned XLen = Subtarget->getXLen();
unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
unsigned TrailingZeros = llvm::countr_zero(Mask);
@@ -984,7 +984,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has
// 32 leading zeros and C3 trailing zeros.
- if (isShiftedMask_64(Mask) && N0.hasOneUse() && Subtarget->is64Bit()) {
+ if (isShiftedMask_64(Mask) && N0.hasOneUse()) {
unsigned XLen = Subtarget->getXLen();
unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
unsigned TrailingZeros = llvm::countr_zero(Mask);
@@ -1143,7 +1143,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
unsigned Leading = XLen - llvm::bit_width(C1);
if (C2 < Leading) {
// If the number of leading zeros is C2+32 this can be SRLIW.
- if (C2 + 32 == Leading && Subtarget->is64Bit()) {
+ if (C2 + 32 == Leading) {
SDNode *SRLIW = CurDAG->getMachineNode(
RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
ReplaceNode(Node, SRLIW);
@@ -1157,8 +1157,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// legalized and goes through DAG combine.
if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() &&
X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
- cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32 &&
- Subtarget->is64Bit()) {
+ cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
SDNode *SRAIW =
CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
CurDAG->getTargetConstant(31, DL, VT));
@@ -1233,7 +1232,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
// shifted mask with c2 leading zeros and c3 trailing zeros.
- if (!LeftShift && isShiftedMask_64(C1) && Subtarget->is64Bit()) {
+ if (!LeftShift && isShiftedMask_64(C1)) {
unsigned Leading = XLen - llvm::bit_width(C1);
unsigned Trailing = llvm::countr_zero(C1);
if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
@@ -2681,7 +2680,7 @@ bool RISCVDAGToDAGISel::selectSHXADDOp(SDValue N, unsigned ShAmt,
if (N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
isa<ConstantSDNode>(N0.getOperand(1))) {
uint64_t Mask = N0.getConstantOperandVal(1);
- if (isShiftedMask_64(Mask) && Subtarget->is64Bit()) {
+ if (isShiftedMask_64(Mask)) {
unsigned C1 = N.getConstantOperandVal(1);
unsigned XLen = Subtarget->getXLen();
unsigned Leading = XLen - llvm::bit_width(Mask);
diff --git a/llvm/test/CodeGen/RISCV/aext.ll b/llvm/test/CodeGen/RISCV/aext.ll
new file mode 100644
index 000000000000000..e4c67fad57c3cfa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/aext.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64I %s
+
+define i24 @aext(i32 %0) {
+; RV32I-LABEL: aext:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 0
+; RV32I-NEXT: srli a0, a0, 8
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: aext:
+; RV64I: # %bb.0:
+; RV64I-NEXT: srliw a0, a0, 8
+; RV64I-NEXT: ret
+ %2 = and i32 %0, -256
+ %3 = lshr exact i32 %2, 8
+ %4 = trunc i32 %3 to i24
+ ret i24 %4
+}
More information about the llvm-commits
mailing list