[llvm] [RISCV] Inhibit DAG folding shl through zext.w pattern with zba (PR #91626)
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Thu May 9 11:02:55 PDT 2024
https://github.com/preames updated https://github.com/llvm/llvm-project/pull/91626
>From 2221121e138ef693fea3e8c4594fe2fc559820e1 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Wed, 8 May 2024 10:20:21 -0700
Subject: [PATCH 1/2] [RISCV] Inhibit DAG folding shl through zext.w pattern
with zba
If we allow the fold, the zext.w pattern becomes an and by shifted 32 bit
mask. In practice, we can't undo this during ISEL resulting in worse code
in some cases. There is a cost to inhibiting the generic transform -- we
loose out on the possibily of folds enabled by pushing the shift earlier.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 7 +++++++
llvm/test/CodeGen/RISCV/rv64zba.ll | 6 ++----
2 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 846768f6d631e..60b21fb508990 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17141,6 +17141,13 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
return false;
}
}
+
+ // Don't break slli.uw patterns.
+ if (Subtarget.hasStdExtZba() && Ty.isScalarInteger() && N->getOpcode() == ISD::SHL &&
+ N0.getOpcode() == ISD::AND && isa<ConstantSDNode>(N0.getOperand(1)) &&
+ N0.getConstantOperandVal(1) == UINT64_C(0xffffffff))
+ return false;
+
return true;
}
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 8fe221f2a297a..a0a7db538e835 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -2866,8 +2866,7 @@ define ptr @gep_lshr_i32(ptr %0, i64 %1) {
;
; RV64ZBA-LABEL: gep_lshr_i32:
; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: slli a1, a1, 2
-; RV64ZBA-NEXT: srli a1, a1, 4
+; RV64ZBA-NEXT: srli a1, a1, 2
; RV64ZBA-NEXT: slli.uw a1, a1, 4
; RV64ZBA-NEXT: sh2add a1, a1, a1
; RV64ZBA-NEXT: add a0, a0, a1
@@ -2891,8 +2890,7 @@ define i64 @srli_slliw(i64 %1) {
;
; RV64ZBA-LABEL: srli_slliw:
; RV64ZBA: # %bb.0: # %entry
-; RV64ZBA-NEXT: slli a0, a0, 2
-; RV64ZBA-NEXT: srli a0, a0, 4
+; RV64ZBA-NEXT: srli a0, a0, 2
; RV64ZBA-NEXT: slli.uw a0, a0, 4
; RV64ZBA-NEXT: ret
entry:
>From 54fc5f6cb4d8439b4436073ef122794dab69efc1 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Thu, 9 May 2024 11:02:17 -0700
Subject: [PATCH 2/2] Clang format
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 60b21fb508990..cf682d9129e13 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17143,8 +17143,9 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
}
// Don't break slli.uw patterns.
- if (Subtarget.hasStdExtZba() && Ty.isScalarInteger() && N->getOpcode() == ISD::SHL &&
- N0.getOpcode() == ISD::AND && isa<ConstantSDNode>(N0.getOperand(1)) &&
+ if (Subtarget.hasStdExtZba() && Ty.isScalarInteger() &&
+ N->getOpcode() == ISD::SHL && N0.getOpcode() == ISD::AND &&
+ isa<ConstantSDNode>(N0.getOperand(1)) &&
N0.getConstantOperandVal(1) == UINT64_C(0xffffffff))
return false;
More information about the llvm-commits
mailing list