[llvm] 91aa233 - [RISCV] Add isel pattern for (and X, 0xffffffff << C) with Zba.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 4 18:05:22 PDT 2022
Author: Craig Topper
Date: 2022-11-04T18:00:02-07:00
New Revision: 91aa233bdf9da29391e107e0575a733deb38c4c2
URL: https://github.com/llvm/llvm-project/commit/91aa233bdf9da29391e107e0575a733deb38c4c2
DIFF: https://github.com/llvm/llvm-project/commit/91aa233bdf9da29391e107e0575a733deb38c4c2.diff
LOG: [RISCV] Add isel pattern for (and X, 0xffffffff << C) with Zba.
We can use SRLI by C to clear LSBs followed by a SLLI_UW.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
llvm/test/CodeGen/RISCV/rv64zba.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 90ae2ac398436..61476dd43d296 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -242,6 +242,16 @@ def non_imm12 : PatLeaf<(XLenVT GPR:$a), [{
return !C || !isInt<12>(C->getSExtValue());
}]>;
+def Shifted32OnesMask : PatLeaf<(imm), [{
+ uint64_t Imm = N->getZExtValue();
+ if (!isShiftedMask_64(Imm))
+ return false;
+
+ unsigned TrailingZeros = countTrailingZeros(Imm);
+ return TrailingZeros > 0 && TrailingZeros < 32 &&
+ Imm == UINT64_C(0xFFFFFFFF) << TrailingZeros;
+}], TrailingZeros>;
+
def sh1add_op : ComplexPattern<XLenVT, 1, "selectSH1ADDOp", [], [], 6>;
def sh2add_op : ComplexPattern<XLenVT, 1, "selectSH2ADDOp", [], [], 6>;
def sh3add_op : ComplexPattern<XLenVT, 1, "selectSH3ADDOp", [], [], 6>;
@@ -731,6 +741,12 @@ def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 81)),
let Predicates = [HasStdExtZba, IsRV64] in {
def : Pat<(i64 (shl (and GPR:$rs1, 0xFFFFFFFF), uimm5:$shamt)),
(SLLI_UW GPR:$rs1, uimm5:$shamt)>;
+// Match a shifted 0xffffffff mask. Use SRLI to clear the LSBs and SLLI_UW to
+// mask and shift.
+def : Pat<(i64 (and GPR:$rs1, Shifted32OnesMask:$mask)),
+ (SLLI_UW (SRLI GPR:$rs1, Shifted32OnesMask:$mask),
+ Shifted32OnesMask:$mask)>;
+
def : Pat<(i64 (add (and GPR:$rs1, 0xFFFFFFFF), non_imm12:$rs2)),
(ADD_UW GPR:$rs1, GPR:$rs2)>;
def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, X0)>;
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index e9161ae8d69d0..6fa4e37782b37 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -1408,6 +1408,33 @@ define signext i16 @srliw_1_sh1add(i16* %0, i32 signext %1) {
ret i16 %6
}
+define i128 @slliuw_ptr
diff (i64 %
diff , i128* %baseptr) {
+; RV64I-LABEL: slliuw_ptr
diff :
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 1
+; RV64I-NEXT: slli a2, a2, 36
+; RV64I-NEXT: addi a2, a2, -16
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a1, a1, a0
+; RV64I-NEXT: ld a0, 0(a1)
+; RV64I-NEXT: ld a1, 8(a1)
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: slliuw_ptr
diff :
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: srli a0, a0, 4
+; RV64ZBA-NEXT: slli.uw a0, a0, 4
+; RV64ZBA-NEXT: add a1, a1, a0
+; RV64ZBA-NEXT: ld a0, 0(a1)
+; RV64ZBA-NEXT: ld a1, 8(a1)
+; RV64ZBA-NEXT: ret
+ %ptr
diff = lshr exact i64 %
diff , 4
+ %cast = and i64 %ptr
diff , 4294967295
+ %ptr = getelementptr inbounds i128, i128* %baseptr, i64 %cast
+ %res = load i128, i128* %ptr
+ ret i128 %res
+}
+
define signext i32 @srliw_2_sh2add(i32* %0, i32 signext %1) {
; RV64I-LABEL: srliw_2_sh2add:
; RV64I: # %bb.0:
More information about the llvm-commits
mailing list