[llvm] [RISCV] Select (and (srl x, c2), c1) as (srli (srai x, c2-c3)). (PR #101868)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Aug 4 00:36:32 PDT 2024
https://github.com/topperc created https://github.com/llvm/llvm-project/pull/101868
If c1 is a mask with c3 leading zeros and c3 is larger than c2.
Fixes regression reported in #101751.
>From 4907e1c1fe6a7b68da322c15dd1fcbba3d235527 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Sun, 4 Aug 2024 00:33:47 -0700
Subject: [PATCH] [RISCV] Select (and (srl x, c2), c1) as (srli (srai x,
c2-c3)).
If c1 is a mask with c3 leading zeros and c3 is larger than c2.
Fixes regression reported in #101751.
---
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 25 +++++++++++++++++++
.../RISCV/lack-of-signed-truncation-check.ll | 22 ++++++++--------
.../CodeGen/RISCV/signed-truncation-check.ll | 22 ++++++++--------
3 files changed, 45 insertions(+), 24 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 31f48a6ac24d7..f1f345d65854e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1449,6 +1449,31 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
}
}
+ // Turn (and (srl x, c2), c1) -> (srli (srai x, c2-c3)) if c1 is a mask with
+ // c3 leading zeros and c2 is larger than c3.
+ if (N0.getOpcode() == ISD::SRA && isa<ConstantSDNode>(N0.getOperand(1)) &&
+ N0.hasOneUse()) {
+ unsigned C2 = N0.getConstantOperandVal(1);
+ unsigned XLen = Subtarget->getXLen();
+ assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
+
+ SDValue X = N0.getOperand(0);
+
+ if (isMask_64(C1)) {
+ unsigned Leading = XLen - llvm::bit_width(C1);
+ if (C2 > Leading) {
+ SDNode *SRAI = CurDAG->getMachineNode(
+ RISCV::SRAI, DL, VT, X,
+ CurDAG->getTargetConstant(C2 - Leading, DL, VT));
+ SDNode *SRLI = CurDAG->getMachineNode(
+ RISCV::SRLI, DL, VT, SDValue(SRAI, 0),
+ CurDAG->getTargetConstant(Leading, DL, VT));
+ ReplaceNode(Node, SRLI);
+ return;
+ }
+ }
+ }
+
// If C1 masks off the upper bits only (but can't be formed as an
// ANDI), use an unsigned bitfield extract (e.g., th.extu), if
// available.
diff --git a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
index 6e3a50542939f..c7ba0e501fa44 100644
--- a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
+++ b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
@@ -24,25 +24,23 @@
define i1 @shifts_necmp_i16_i8(i16 %x) nounwind {
; RV32I-LABEL: shifts_necmp_i16_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: lui a1, 16
-; RV32I-NEXT: addi a1, a1, -1
-; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: srli a1, a1, 16
; RV32I-NEXT: slli a0, a0, 24
-; RV32I-NEXT: srai a0, a0, 24
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: srai a0, a0, 8
+; RV32I-NEXT: srli a0, a0, 16
+; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: shifts_necmp_i16_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 16
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: slli a1, a0, 48
+; RV64I-NEXT: srli a1, a1, 48
; RV64I-NEXT: slli a0, a0, 56
-; RV64I-NEXT: srai a0, a0, 56
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: xor a0, a0, a2
+; RV64I-NEXT: srai a0, a0, 8
+; RV64I-NEXT: srli a0, a0, 48
+; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/signed-truncation-check.ll b/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
index de36bcdb91060..54b85fab757ca 100644
--- a/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
+++ b/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
@@ -24,25 +24,23 @@
define i1 @shifts_eqcmp_i16_i8(i16 %x) nounwind {
; RV32I-LABEL: shifts_eqcmp_i16_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: lui a1, 16
-; RV32I-NEXT: addi a1, a1, -1
-; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: srli a1, a1, 16
; RV32I-NEXT: slli a0, a0, 24
-; RV32I-NEXT: srai a0, a0, 24
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: srai a0, a0, 8
+; RV32I-NEXT: srli a0, a0, 16
+; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: shifts_eqcmp_i16_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 16
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: slli a1, a0, 48
+; RV64I-NEXT: srli a1, a1, 48
; RV64I-NEXT: slli a0, a0, 56
-; RV64I-NEXT: srai a0, a0, 56
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: xor a0, a0, a2
+; RV64I-NEXT: srai a0, a0, 8
+; RV64I-NEXT: srli a0, a0, 48
+; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ret
;
More information about the llvm-commits
mailing list