[llvm-branch-commits] [llvm] 095e245 - [RISCV] Add isel patterns for SH*ADD(.UW)
Craig Topper via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Jan 22 13:33:25 PST 2021
Author: Craig Topper
Date: 2021-01-22T13:28:41-08:00
New Revision: 095e245e164584b5de3c2938452b48d1f8ba4dda
URL: https://github.com/llvm/llvm-project/commit/095e245e164584b5de3c2938452b48d1f8ba4dda
DIFF: https://github.com/llvm/llvm-project/commit/095e245e164584b5de3c2938452b48d1f8ba4dda.diff
LOG: [RISCV] Add isel patterns for SH*ADD(.UW)
This adds an initial set of patterns for these instructions. Its
more complicated that I would like for the sh*add.uw instructions
because there is no guaranteed canonicalization for shl/and with
constants.
Reviewed By: asb
Differential Revision: https://reviews.llvm.org/D95106
Added:
llvm/test/CodeGen/RISCV/rv32Zba.ll
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfoB.td
llvm/test/CodeGen/RISCV/rv64Zba.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
index 0ba314dd6ecf..d4ba4f8e285c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td
@@ -921,6 +921,15 @@ def : Pat<(or (or (and (shl GPR:$rs1, (i64 1)), (i64 0x4444444444444444)),
(SHFLI GPR:$rs1, (i64 1))>;
} // Predicates = [HasStdExtZbp, IsRV64]
+let Predicates = [HasStdExtZba] in {
+def : Pat<(add (shl GPR:$rs1, (XLenVT 1)), GPR:$rs2),
+ (SH1ADD GPR:$rs1, GPR:$rs2)>;
+def : Pat<(add (shl GPR:$rs1, (XLenVT 2)), GPR:$rs2),
+ (SH2ADD GPR:$rs1, GPR:$rs2)>;
+def : Pat<(add (shl GPR:$rs1, (XLenVT 3)), GPR:$rs2),
+ (SH3ADD GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZba]
+
let Predicates = [HasStdExtZba, IsRV64] in {
def : Pat<(SLLIUWPat GPR:$rs1, uimm5:$shamt),
(SLLIUW GPR:$rs1, uimm5:$shamt)>;
@@ -929,7 +938,21 @@ def : Pat<(shl (and GPR:$rs1, 0xFFFFFFFF), uimm5:$shamt),
def : Pat<(add (and GPR:$rs1, (i64 0xFFFFFFFF)), GPR:$rs2),
(ADDUW GPR:$rs1, GPR:$rs2)>;
def : Pat<(and GPR:$rs, 0x00000000FFFFFFFF), (ADDUW GPR:$rs, X0)>;
-}
+
+def : Pat<(add (shl (and GPR:$rs1, (i64 0xFFFFFFFF)), (XLenVT 1)), GPR:$rs2),
+ (SH1ADDUW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(add (shl (and GPR:$rs1, (i64 0xFFFFFFFF)), (XLenVT 2)), GPR:$rs2),
+ (SH2ADDUW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(add (shl (and GPR:$rs1, (i64 0xFFFFFFFF)), (XLenVT 3)), GPR:$rs2),
+ (SH3ADDUW GPR:$rs1, GPR:$rs2)>;
+
+def : Pat<(add (SLLIUWPat GPR:$rs1, (XLenVT 1)), GPR:$rs2),
+ (SH1ADDUW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(add (SLLIUWPat GPR:$rs1, (XLenVT 2)), GPR:$rs2),
+ (SH2ADDUW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(add (SLLIUWPat GPR:$rs1, (XLenVT 3)), GPR:$rs2),
+ (SH3ADDUW GPR:$rs1, GPR:$rs2)>;
+} // Predicates = [HasStdExtZba, IsRV64]
let Predicates = [HasStdExtZbp, IsRV64] in {
def : Pat<(not (riscv_sllw (not GPR:$rs1), GPR:$rs2)),
diff --git a/llvm/test/CodeGen/RISCV/rv32Zba.ll b/llvm/test/CodeGen/RISCV/rv32Zba.ll
new file mode 100644
index 000000000000..b4e4ffc5ca72
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv32Zba.ll
@@ -0,0 +1,82 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-b -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefix=RV32IB
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zba -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefix=RV32IBA
+
+define signext i16 @sh1add(i64 %0, i16* %1) {
+; RV32I-LABEL: sh1add:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: lh a0, 0(a0)
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: sh1add:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: sh1add a0, a0, a2
+; RV32IB-NEXT: lh a0, 0(a0)
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: sh1add:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: sh1add a0, a0, a2
+; RV32IBA-NEXT: lh a0, 0(a0)
+; RV32IBA-NEXT: ret
+ %3 = getelementptr inbounds i16, i16* %1, i64 %0
+ %4 = load i16, i16* %3
+ ret i16 %4
+}
+
+define i32 @sh2add(i64 %0, i32* %1) {
+; RV32I-LABEL: sh2add:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 2
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: lw a0, 0(a0)
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: sh2add:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: sh2add a0, a0, a2
+; RV32IB-NEXT: lw a0, 0(a0)
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: sh2add:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: sh2add a0, a0, a2
+; RV32IBA-NEXT: lw a0, 0(a0)
+; RV32IBA-NEXT: ret
+ %3 = getelementptr inbounds i32, i32* %1, i64 %0
+ %4 = load i32, i32* %3
+ ret i32 %4
+}
+
+define i64 @sh3add(i64 %0, i64* %1) {
+; RV32I-LABEL: sh3add:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a0, a0, 3
+; RV32I-NEXT: add a1, a2, a0
+; RV32I-NEXT: lw a0, 0(a1)
+; RV32I-NEXT: lw a1, 4(a1)
+; RV32I-NEXT: ret
+;
+; RV32IB-LABEL: sh3add:
+; RV32IB: # %bb.0:
+; RV32IB-NEXT: sh3add a1, a0, a2
+; RV32IB-NEXT: lw a0, 0(a1)
+; RV32IB-NEXT: lw a1, 4(a1)
+; RV32IB-NEXT: ret
+;
+; RV32IBA-LABEL: sh3add:
+; RV32IBA: # %bb.0:
+; RV32IBA-NEXT: sh3add a1, a0, a2
+; RV32IBA-NEXT: lw a0, 0(a1)
+; RV32IBA-NEXT: lw a1, 4(a1)
+; RV32IBA-NEXT: ret
+ %3 = getelementptr inbounds i64, i64* %1, i64 %0
+ %4 = load i64, i64* %3
+ ret i64 %4
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64Zba.ll b/llvm/test/CodeGen/RISCV/rv64Zba.ll
index 95820d49e97a..245de0d3a83d 100644
--- a/llvm/test/CodeGen/RISCV/rv64Zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64Zba.ll
@@ -129,3 +129,234 @@ define i64 @zextw_i64(i64 %a) nounwind {
%and = and i64 %a, 4294967295
ret i64 %and
}
+
+define signext i16 @sh1add(i64 %0, i16* %1) {
+; RV64I-LABEL: sh1add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 1
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: sh1add:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: sh1add a0, a0, a1
+; RV64IB-NEXT: lh a0, 0(a0)
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: sh1add:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: sh1add a0, a0, a1
+; RV64IBA-NEXT: lh a0, 0(a0)
+; RV64IBA-NEXT: ret
+ %3 = getelementptr inbounds i16, i16* %1, i64 %0
+ %4 = load i16, i16* %3
+ ret i16 %4
+}
+
+define signext i32 @sh2add(i64 %0, i32* %1) {
+; RV64I-LABEL: sh2add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 2
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: sh2add:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: sh2add a0, a0, a1
+; RV64IB-NEXT: lw a0, 0(a0)
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: sh2add:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: sh2add a0, a0, a1
+; RV64IBA-NEXT: lw a0, 0(a0)
+; RV64IBA-NEXT: ret
+ %3 = getelementptr inbounds i32, i32* %1, i64 %0
+ %4 = load i32, i32* %3
+ ret i32 %4
+}
+
+define i64 @sh3add(i64 %0, i64* %1) {
+; RV64I-LABEL: sh3add:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: sh3add:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: sh3add a0, a0, a1
+; RV64IB-NEXT: ld a0, 0(a0)
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: sh3add:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: sh3add a0, a0, a1
+; RV64IBA-NEXT: ld a0, 0(a0)
+; RV64IBA-NEXT: ret
+ %3 = getelementptr inbounds i64, i64* %1, i64 %0
+ %4 = load i64, i64* %3
+ ret i64 %4
+}
+
+define signext i16 @sh1adduw(i32 signext %0, i16* %1) {
+; RV64I-LABEL: sh1adduw:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: slli a0, a0, 1
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lh a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: sh1adduw:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: sh1add.uw a0, a0, a1
+; RV64IB-NEXT: lh a0, 0(a0)
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: sh1adduw:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: sh1add.uw a0, a0, a1
+; RV64IBA-NEXT: lh a0, 0(a0)
+; RV64IBA-NEXT: ret
+ %3 = zext i32 %0 to i64
+ %4 = getelementptr inbounds i16, i16* %1, i64 %3
+ %5 = load i16, i16* %4
+ ret i16 %5
+}
+
+define i64 @sh1adduw_2(i64 %0, i64 %1) {
+; RV64I-LABEL: sh1adduw_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 1
+; RV64I-NEXT: addi a2, zero, 1
+; RV64I-NEXT: slli a2, a2, 33
+; RV64I-NEXT: addi a2, a2, -2
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: sh1adduw_2:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: sh1add.uw a0, a0, a1
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: sh1adduw_2:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: sh1add.uw a0, a0, a1
+; RV64IBA-NEXT: ret
+ %3 = shl i64 %0, 1
+ %4 = and i64 %3, 8589934590
+ %5 = add i64 %4, %1
+ ret i64 %5
+}
+
+define signext i32 @sh2adduw(i32 signext %0, i32* %1) {
+; RV64I-LABEL: sh2adduw:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: slli a0, a0, 2
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: lw a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: sh2adduw:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: sh2add.uw a0, a0, a1
+; RV64IB-NEXT: lw a0, 0(a0)
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: sh2adduw:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: sh2add.uw a0, a0, a1
+; RV64IBA-NEXT: lw a0, 0(a0)
+; RV64IBA-NEXT: ret
+ %3 = zext i32 %0 to i64
+ %4 = getelementptr inbounds i32, i32* %1, i64 %3
+ %5 = load i32, i32* %4
+ ret i32 %5
+}
+
+define i64 @sh2adduw_2(i64 %0, i64 %1) {
+; RV64I-LABEL: sh2adduw_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 2
+; RV64I-NEXT: addi a2, zero, 1
+; RV64I-NEXT: slli a2, a2, 34
+; RV64I-NEXT: addi a2, a2, -4
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: sh2adduw_2:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: sh2add.uw a0, a0, a1
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: sh2adduw_2:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: sh2add.uw a0, a0, a1
+; RV64IBA-NEXT: ret
+ %3 = shl i64 %0, 2
+ %4 = and i64 %3, 17179869180
+ %5 = add i64 %4, %1
+ ret i64 %5
+}
+
+define i64 @sh3adduw(i32 signext %0, i64* %1) {
+; RV64I-LABEL: sh3adduw:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: ld a0, 0(a0)
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: sh3adduw:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: sh3add.uw a0, a0, a1
+; RV64IB-NEXT: ld a0, 0(a0)
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: sh3adduw:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: sh3add.uw a0, a0, a1
+; RV64IBA-NEXT: ld a0, 0(a0)
+; RV64IBA-NEXT: ret
+ %3 = zext i32 %0 to i64
+ %4 = getelementptr inbounds i64, i64* %1, i64 %3
+ %5 = load i64, i64* %4
+ ret i64 %5
+}
+
+define i64 @sh3adduw_2(i64 %0, i64 %1) {
+; RV64I-LABEL: sh3adduw_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: addi a2, zero, 1
+; RV64I-NEXT: slli a2, a2, 35
+; RV64I-NEXT: addi a2, a2, -8
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: sh3adduw_2:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: sh3add.uw a0, a0, a1
+; RV64IB-NEXT: ret
+;
+; RV64IBA-LABEL: sh3adduw_2:
+; RV64IBA: # %bb.0:
+; RV64IBA-NEXT: sh3add.uw a0, a0, a1
+; RV64IBA-NEXT: ret
+ %3 = shl i64 %0, 3
+ %4 = and i64 %3, 34359738360
+ %5 = add i64 %4, %1
+ ret i64 %5
+}
More information about the llvm-branch-commits
mailing list