[llvm] [RISCV] Extend pattern matches involving shNadd to support disjoint or (PR #87001)
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 28 13:13:09 PDT 2024
https://github.com/preames created https://github.com/llvm/llvm-project/pull/87001
I tried to add representative tests while not duplicating complete
coverage. If there's other tests you'd like to see, let me know.
>From a9e4e4af9d25a0983ddb83e16cb3b406c05d1a90 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Thu, 28 Mar 2024 12:12:08 -0700
Subject: [PATCH 1/2] [RISCV] Add tests for forming shNadd patterns involving
disjoint or
---
llvm/test/CodeGen/RISCV/rv64zba.ll | 87 ++++++++++++++++++++++++++++++
1 file changed, 87 insertions(+)
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index d9d83633a8537f..03f1b59fcf23a8 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -209,6 +209,25 @@ define i64 @sh1adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh1adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh1adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 31
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh1adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli.uw a0, a0, 1
+; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 1
+ %4 = and i64 %3, 8589934590
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
define signext i32 @sh2adduw(i32 signext %0, ptr %1) {
; RV64I-LABEL: sh2adduw:
; RV64I: # %bb.0:
@@ -247,6 +266,25 @@ define i64 @sh2adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh2adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh2adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 30
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh2adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli.uw a0, a0, 2
+; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 2
+ %4 = and i64 %3, 17179869180
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
define i64 @sh3adduw(i32 signext %0, ptr %1) {
; RV64I-LABEL: sh3adduw:
; RV64I: # %bb.0:
@@ -285,6 +323,25 @@ define i64 @sh3adduw_2(i64 %0, i64 %1) {
ret i64 %5
}
+define i64 @sh3adduw_3(i64 %0, i64 %1) {
+; RV64I-LABEL: sh3adduw_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 29
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: sh3adduw_3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli.uw a0, a0, 3
+; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %3 = shl i64 %0, 3
+ %4 = and i64 %3, 34359738360
+ %5 = or disjoint i64 %4, %1
+ ret i64 %5
+}
+
; Type legalization inserts a sext_inreg after the first add. That add will be
; selected as sh2add which does not sign extend. SimplifyDemandedBits is unable
; to remove the sext_inreg because it has multiple uses. The ashr will use the
@@ -335,6 +392,25 @@ define i64 @addmul6(i64 %a, i64 %b) {
ret i64 %d
}
+define i64 @disjointormul6(i64 %a, i64 %b) {
+; RV64I-LABEL: disjointormul6:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 6
+; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: disjointormul6:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: sh1add a0, a0, a0
+; RV64ZBA-NEXT: slli a0, a0, 1
+; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: ret
+ %c = mul i64 %a, 6
+ %d = or disjoint i64 %c, %b
+ ret i64 %d
+}
+
define i64 @addmul10(i64 %a, i64 %b) {
; RV64I-LABEL: addmul10:
; RV64I: # %bb.0:
@@ -1099,6 +1175,17 @@ define i64 @add4104(i64 %a) {
ret i64 %c
}
+define i64 @add4104_2(i64 %a) {
+; CHECK-LABEL: add4104_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a1, 1
+; CHECK-NEXT: addiw a1, a1, 8
+; CHECK-NEXT: or a0, a0, a1
+; CHECK-NEXT: ret
+ %c = or disjoint i64 %a, 4104
+ ret i64 %c
+}
+
define i64 @add8208(i64 %a) {
; RV64I-LABEL: add8208:
; RV64I: # %bb.0:
>From b8b89e9719bfa8d477c43b6ac3f96af89477a5e0 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Thu, 28 Mar 2024 12:13:29 -0700
Subject: [PATCH 2/2] [RISCV] Extend pattern matches involving shNadd to
support disjoint or
I tried to add representative tests while not duplicating complete
coverage. If there's other tests you'd like to see, let me know.
---
llvm/lib/Target/RISCV/RISCVInstrInfoZb.td | 44 +++++++++++------------
llvm/test/CodeGen/RISCV/rv64zba.ll | 30 ++++++++--------
2 files changed, 38 insertions(+), 36 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 549bc039fabbc7..ba7157eee60c82 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -685,29 +685,29 @@ foreach i = {1,2,3} in {
(shxadd pat:$rs1, GPR:$rs2)>;
}
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 6)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 6)), GPR:$rs2),
(SH1ADD (XLenVT (SH1ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 10)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 10)), GPR:$rs2),
(SH1ADD (XLenVT (SH2ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 18)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 18)), GPR:$rs2),
(SH1ADD (XLenVT (SH3ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 12)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 12)), GPR:$rs2),
(SH2ADD (XLenVT (SH1ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 20)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 20)), GPR:$rs2),
(SH2ADD (XLenVT (SH2ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 36)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 36)), GPR:$rs2),
(SH2ADD (XLenVT (SH3ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 24)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 24)), GPR:$rs2),
(SH3ADD (XLenVT (SH1ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 40)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 40)), GPR:$rs2),
(SH3ADD (XLenVT (SH2ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (mul_oneuse GPR:$rs1, (XLenVT 72)), GPR:$rs2),
+def : Pat<(add_like (mul_oneuse GPR:$rs1, (XLenVT 72)), GPR:$rs2),
(SH3ADD (XLenVT (SH3ADD GPR:$rs1, GPR:$rs1)), GPR:$rs2)>;
-def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy4:$i),
+def : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy4:$i),
(SH2ADD (XLenVT (ADDI (XLenVT X0), (SimmShiftRightBy2XForm CSImm12MulBy4:$i))),
GPR:$r)>;
-def : Pat<(add (XLenVT GPR:$r), CSImm12MulBy8:$i),
+def : Pat<(add_like (XLenVT GPR:$r), CSImm12MulBy8:$i),
(SH3ADD (XLenVT (ADDI (XLenVT X0), (SimmShiftRightBy3XForm CSImm12MulBy8:$i))),
GPR:$r)>;
@@ -762,37 +762,37 @@ def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (ADD_UW GPR:$rs, (XLenVT X0))>;
foreach i = {1,2,3} in {
defvar shxadd_uw = !cast<Instruction>("SH"#i#"ADD_UW");
- def : Pat<(i64 (add_non_imm12 (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 i)), (XLenVT GPR:$rs2))),
+ def : Pat<(i64 (add_like_non_imm12 (shl (and GPR:$rs1, 0xFFFFFFFF), (i64 i)), (XLenVT GPR:$rs2))),
(shxadd_uw GPR:$rs1, GPR:$rs2)>;
}
-def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 1)), 0x1FFFFFFFF), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 1)), 0x1FFFFFFFF), (XLenVT GPR:$rs2))),
(SH1ADD_UW GPR:$rs1, GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 2)), 0x3FFFFFFFF), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 2)), 0x3FFFFFFFF), (XLenVT GPR:$rs2))),
(SH2ADD_UW GPR:$rs1, GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and (shl GPR:$rs1, (i64 3)), 0x7FFFFFFFF), (XLenVT GPR:$rs2))),
(SH3ADD_UW GPR:$rs1, GPR:$rs2)>;
// More complex cases use a ComplexPattern.
foreach i = {1,2,3} in {
defvar pat = !cast<ComplexPattern>("sh"#i#"add_uw_op");
- def : Pat<(i64 (add_non_imm12 pat:$rs1, (XLenVT GPR:$rs2))),
+ def : Pat<(i64 (add_like_non_imm12 pat:$rs1, (XLenVT GPR:$rs2))),
(!cast<Instruction>("SH"#i#"ADD_UW") pat:$rs1, GPR:$rs2)>;
}
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFFE), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFE), (XLenVT GPR:$rs2))),
(SH1ADD (XLenVT (SRLIW GPR:$rs1, 1)), GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFFC), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFC), (XLenVT GPR:$rs2))),
(SH2ADD (XLenVT (SRLIW GPR:$rs1, 2)), GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0xFFFFFFF8), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFF8), (XLenVT GPR:$rs2))),
(SH3ADD (XLenVT (SRLIW GPR:$rs1, 3)), GPR:$rs2)>;
// Use SRLI to clear the LSBs and SHXADD_UW to mask and shift.
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x1FFFFFFFE), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x1FFFFFFFE), (XLenVT GPR:$rs2))),
(SH1ADD_UW (XLenVT (SRLI GPR:$rs1, 1)), GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x3FFFFFFFC), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x3FFFFFFFC), (XLenVT GPR:$rs2))),
(SH2ADD_UW (XLenVT (SRLI GPR:$rs1, 2)), GPR:$rs2)>;
-def : Pat<(i64 (add_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8), (XLenVT GPR:$rs2))),
+def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0x7FFFFFFF8), (XLenVT GPR:$rs2))),
(SH3ADD_UW (XLenVT (SRLI GPR:$rs1, 3)), GPR:$rs2)>;
def : Pat<(i64 (mul (and_oneuse GPR:$r, 0xFFFFFFFF), C3LeftShiftUW:$i)),
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 03f1b59fcf23a8..c81c6aeaab890c 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -219,8 +219,7 @@ define i64 @sh1adduw_3(i64 %0, i64 %1) {
;
; RV64ZBA-LABEL: sh1adduw_3:
; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli.uw a0, a0, 1
-; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: sh1add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
%3 = shl i64 %0, 1
%4 = and i64 %3, 8589934590
@@ -276,8 +275,7 @@ define i64 @sh2adduw_3(i64 %0, i64 %1) {
;
; RV64ZBA-LABEL: sh2adduw_3:
; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli.uw a0, a0, 2
-; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: sh2add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
%3 = shl i64 %0, 2
%4 = and i64 %3, 17179869180
@@ -333,8 +331,7 @@ define i64 @sh3adduw_3(i64 %0, i64 %1) {
;
; RV64ZBA-LABEL: sh3adduw_3:
; RV64ZBA: # %bb.0:
-; RV64ZBA-NEXT: slli.uw a0, a0, 3
-; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: sh3add.uw a0, a0, a1
; RV64ZBA-NEXT: ret
%3 = shl i64 %0, 3
%4 = and i64 %3, 34359738360
@@ -403,8 +400,7 @@ define i64 @disjointormul6(i64 %a, i64 %b) {
; RV64ZBA-LABEL: disjointormul6:
; RV64ZBA: # %bb.0:
; RV64ZBA-NEXT: sh1add a0, a0, a0
-; RV64ZBA-NEXT: slli a0, a0, 1
-; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: sh1add a0, a0, a1
; RV64ZBA-NEXT: ret
%c = mul i64 %a, 6
%d = or disjoint i64 %c, %b
@@ -1176,12 +1172,18 @@ define i64 @add4104(i64 %a) {
}
define i64 @add4104_2(i64 %a) {
-; CHECK-LABEL: add4104_2:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, 1
-; CHECK-NEXT: addiw a1, a1, 8
-; CHECK-NEXT: or a0, a0, a1
-; CHECK-NEXT: ret
+; RV64I-LABEL: add4104_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a1, 1
+; RV64I-NEXT: addiw a1, a1, 8
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: add4104_2:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: li a1, 1026
+; RV64ZBA-NEXT: sh2add a0, a1, a0
+; RV64ZBA-NEXT: ret
%c = or disjoint i64 %a, 4104
ret i64 %c
}
More information about the llvm-commits
mailing list