[llvm] 5bc99fb - [RISCV] Select (and (sra x, c2), c1) as (srli (srai x, c2-c3), c3). (#101868)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Aug 4 22:35:41 PDT 2024
Author: Craig Topper
Date: 2024-08-04T22:35:38-07:00
New Revision: 5bc99fb515f2411e458517f902435c28f2de94c3
URL: https://github.com/llvm/llvm-project/commit/5bc99fb515f2411e458517f902435c28f2de94c3
DIFF: https://github.com/llvm/llvm-project/commit/5bc99fb515f2411e458517f902435c28f2de94c3.diff
LOG: [RISCV] Select (and (sra x, c2), c1) as (srli (srai x, c2-c3), c3). (#101868)
If c1 is a mask with c3 leading zeros and c3 is larger than c2.
Fixes regression reported in #101751.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
llvm/test/CodeGen/RISCV/signed-truncation-check.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index aed10c2de4372..3dcfeecec1e75 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1450,6 +1450,31 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
}
}
+ // Turn (and (sra x, c2), c1) -> (srli (srai x, c2-c3), c3) if c1 is a mask
+ // with c3 leading zeros and c2 is larger than c3.
+ if (N0.getOpcode() == ISD::SRA && isa<ConstantSDNode>(N0.getOperand(1)) &&
+ N0.hasOneUse()) {
+ unsigned C2 = N0.getConstantOperandVal(1);
+ unsigned XLen = Subtarget->getXLen();
+ assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
+
+ SDValue X = N0.getOperand(0);
+
+ if (isMask_64(C1)) {
+ unsigned Leading = XLen - llvm::bit_width(C1);
+ if (C2 > Leading) {
+ SDNode *SRAI = CurDAG->getMachineNode(
+ RISCV::SRAI, DL, VT, X,
+ CurDAG->getTargetConstant(C2 - Leading, DL, VT));
+ SDNode *SRLI = CurDAG->getMachineNode(
+ RISCV::SRLI, DL, VT, SDValue(SRAI, 0),
+ CurDAG->getTargetConstant(Leading, DL, VT));
+ ReplaceNode(Node, SRLI);
+ return;
+ }
+ }
+ }
+
// If C1 masks off the upper bits only (but can't be formed as an
// ANDI), use an unsigned bitfield extract (e.g., th.extu), if
// available.
diff --git a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
index 6e3a50542939f..c7ba0e501fa44 100644
--- a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
+++ b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
@@ -24,25 +24,23 @@
define i1 @shifts_necmp_i16_i8(i16 %x) nounwind {
; RV32I-LABEL: shifts_necmp_i16_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: lui a1, 16
-; RV32I-NEXT: addi a1, a1, -1
-; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: srli a1, a1, 16
; RV32I-NEXT: slli a0, a0, 24
-; RV32I-NEXT: srai a0, a0, 24
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: srai a0, a0, 8
+; RV32I-NEXT: srli a0, a0, 16
+; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: shifts_necmp_i16_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 16
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: slli a1, a0, 48
+; RV64I-NEXT: srli a1, a1, 48
; RV64I-NEXT: slli a0, a0, 56
-; RV64I-NEXT: srai a0, a0, 56
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: xor a0, a0, a2
+; RV64I-NEXT: srai a0, a0, 8
+; RV64I-NEXT: srli a0, a0, 48
+; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
index 3e6893731dd03..4749cc656693c 100644
--- a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
+++ b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
@@ -262,3 +262,27 @@ define i64 @sel_shift_bool_i64(i1 %t) {
%shl = select i1 %t, i64 65536, i64 0
ret i64 %shl
}
+
+; FIXME: This should use sraiw+and
+define i64 @sraiw_andi(i32 signext %0, i32 signext %1) nounwind {
+; RV32-LABEL: sraiw_andi:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: srai a0, a0, 2
+; RV32-NEXT: srli a0, a0, 29
+; RV32-NEXT: li a1, 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: sraiw_andi:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: slli a0, a0, 32
+; RV64-NEXT: srai a0, a0, 2
+; RV64-NEXT: srli a0, a0, 61
+; RV64-NEXT: ret
+entry:
+ %3 = add i32 %0, %1
+ %4 = icmp sgt i32 %3, -1
+ %5 = select i1 %4, i64 0, i64 7
+ ret i64 %5
+}
diff --git a/llvm/test/CodeGen/RISCV/signed-truncation-check.ll b/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
index de36bcdb91060..54b85fab757ca 100644
--- a/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
+++ b/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
@@ -24,25 +24,23 @@
define i1 @shifts_eqcmp_i16_i8(i16 %x) nounwind {
; RV32I-LABEL: shifts_eqcmp_i16_i8:
; RV32I: # %bb.0:
-; RV32I-NEXT: lui a1, 16
-; RV32I-NEXT: addi a1, a1, -1
-; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: srli a1, a1, 16
; RV32I-NEXT: slli a0, a0, 24
-; RV32I-NEXT: srai a0, a0, 24
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: xor a0, a0, a2
+; RV32I-NEXT: srai a0, a0, 8
+; RV32I-NEXT: srli a0, a0, 16
+; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: shifts_eqcmp_i16_i8:
; RV64I: # %bb.0:
-; RV64I-NEXT: lui a1, 16
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: slli a1, a0, 48
+; RV64I-NEXT: srli a1, a1, 48
; RV64I-NEXT: slli a0, a0, 56
-; RV64I-NEXT: srai a0, a0, 56
-; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: xor a0, a0, a2
+; RV64I-NEXT: srai a0, a0, 8
+; RV64I-NEXT: srli a0, a0, 48
+; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ret
;
More information about the llvm-commits
mailing list