[llvm] cfe5a08 - [RISCV] Enable Zbb ANDN/ORN/XNOR for more 64-bit constants (#122698)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 14 00:15:18 PST 2025
Author: Piotr Fusik
Date: 2025-01-14T09:15:14+01:00
New Revision: cfe5a0847a42d7e67942d70f938d2d664a95990c
URL: https://github.com/llvm/llvm-project/commit/cfe5a0847a42d7e67942d70f938d2d664a95990c
DIFF: https://github.com/llvm/llvm-project/commit/cfe5a0847a42d7e67942d70f938d2d664a95990c.diff
LOG: [RISCV] Enable Zbb ANDN/ORN/XNOR for more 64-bit constants (#122698)
This extends PR #120221 to 64-bit constants that don't match
the 12-low-bits-set pattern.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 0070fd4520429f..9ccf95970e5b53 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -3216,17 +3216,18 @@ bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
bool RISCVDAGToDAGISel::selectInvLogicImm(SDValue N, SDValue &Val) {
if (!isa<ConstantSDNode>(N))
return false;
-
int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
- if ((Imm & 0xfff) != 0xfff || Imm == -1)
+
+ // For 32-bit signed constants, we can only substitute LUI+ADDI with LUI.
+ if (isInt<32>(Imm) && ((Imm & 0xfff) != 0xfff || Imm == -1))
return false;
+ // Abandon this transform if the constant is needed elsewhere.
for (const SDNode *U : N->users()) {
if (!ISD::isBitwiseLogicOp(U->getOpcode()))
return false;
}
- // For 32-bit signed constants we already know it's a win: LUI+ADDI vs LUI.
// For 64-bit constants, the instruction sequences get complex,
// so we select inverted only if it's cheaper.
if (!isInt<32>(Imm)) {
diff --git a/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll b/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
index 393302c7bb5ab9..d953d34e2d7b9f 100644
--- a/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
+++ b/llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
@@ -330,10 +330,9 @@ define i64 @andnofff(i64 %x) {
;
; RV64-LABEL: andnofff:
; RV64: # %bb.0:
-; RV64-NEXT: li a1, -1
-; RV64-NEXT: slli a1, a1, 56
-; RV64-NEXT: addi a1, a1, 255
-; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: lui a1, 1048560
+; RV64-NEXT: srli a1, a1, 8
+; RV64-NEXT: andn a0, a0, a1
; RV64-NEXT: ret
%and = and i64 %x, -72057594037927681
ret i64 %and
@@ -349,10 +348,9 @@ define i64 @ornofff(i64 %x) {
;
; NOZBS64-LABEL: ornofff:
; NOZBS64: # %bb.0:
-; NOZBS64-NEXT: li a1, -1
-; NOZBS64-NEXT: slli a1, a1, 63
-; NOZBS64-NEXT: addi a1, a1, 2047
-; NOZBS64-NEXT: or a0, a0, a1
+; NOZBS64-NEXT: lui a1, 1048575
+; NOZBS64-NEXT: srli a1, a1, 1
+; NOZBS64-NEXT: orn a0, a0, a1
; NOZBS64-NEXT: ret
;
; ZBS32-LABEL: ornofff:
@@ -380,10 +378,9 @@ define i64 @xornofff(i64 %x) {
;
; RV64-LABEL: xornofff:
; RV64: # %bb.0:
-; RV64-NEXT: li a1, -1
-; RV64-NEXT: slli a1, a1, 60
-; RV64-NEXT: addi a1, a1, 255
-; RV64-NEXT: xor a0, a0, a1
+; RV64-NEXT: lui a1, 1048575
+; RV64-NEXT: srli a1, a1, 4
+; RV64-NEXT: xnor a0, a0, a1
; RV64-NEXT: ret
%xor = xor i64 %x, -1152921504606846721
ret i64 %xor
More information about the llvm-commits
mailing list