[llvm] 266959c - [AArch64][SVE] Add backend support for splats of immediates
Cameron McInally via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 21 11:22:00 PST 2020
Author: Cameron McInally
Date: 2020-02-21T13:21:47-06:00
New Revision: 266959c0f72ff359a60fe43da0cf336604611029
URL: https://github.com/llvm/llvm-project/commit/266959c0f72ff359a60fe43da0cf336604611029
DIFF: https://github.com/llvm/llvm-project/commit/266959c0f72ff359a60fe43da0cf336604611029.diff
LOG: [AArch64][SVE] Add backend support for splats of immediates
This patch adds backend support for splats of both Int and FP immediates.
Differential Revision: https://reviews.llvm.org/D74856
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
llvm/test/CodeGen/AArch64/sve-vector-splat.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index acb1335b2487..d906cc6689d5 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -273,6 +273,8 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
bool SelectCMP_SWAP(SDNode *N);
+ bool SelectSVE8BitLslImm(SDValue N, SDValue &Imm, SDValue &Shift);
+
bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm);
@@ -2918,6 +2920,32 @@ bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {
return true;
}
+bool AArch64DAGToDAGISel::SelectSVE8BitLslImm(SDValue N, SDValue &Base,
+ SDValue &Offset) {
+ auto C = dyn_cast<ConstantSDNode>(N);
+ if (!C)
+ return false;
+
+ auto Ty = N->getValueType(0);
+
+ int64_t Imm = C->getSExtValue();
+ SDLoc DL(N);
+
+ if ((Imm >= -128) && (Imm <= 127)) {
+ Base = CurDAG->getTargetConstant(Imm, DL, Ty);
+ Offset = CurDAG->getTargetConstant(0, DL, Ty);
+ return true;
+ }
+
+ if (((Imm % 256) == 0) && (Imm >= -32768) && (Imm <= 32512)) {
+ Base = CurDAG->getTargetConstant(Imm/256, DL, Ty);
+ Offset = CurDAG->getTargetConstant(8, DL, Ty);
+ return true;
+ }
+
+ return false;
+}
+
bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift) {
if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
const int64_t ImmVal = CNode->getZExtValue();
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 59c478a3a386..f11234787905 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -10,6 +10,8 @@
//
//===----------------------------------------------------------------------===//
+def SVE8BitLslImm : ComplexPattern<i32, 2, "SelectSVE8BitLslImm", [imm]>;
+
// Non-faulting loads - node definitions
//
def SDT_AArch64_LDNF1 : SDTypeProfile<1, 3, [
@@ -329,6 +331,32 @@ let Predicates = [HasSVE] in {
def : Pat<(nxv2f32 (AArch64dup (f32 fpimm0))), (DUP_ZI_S 0, 0)>;
def : Pat<(nxv2f64 (AArch64dup (f64 fpimm0))), (DUP_ZI_D 0, 0)>;
+ // Duplicate Int immediate into all vector elements
+ def : Pat<(nxv16i8 (AArch64dup (i32 (SVE8BitLslImm i32:$a, i32:$b)))),
+ (DUP_ZI_B $a, $b)>;
+ def : Pat<(nxv8i16 (AArch64dup (i32 (SVE8BitLslImm i32:$a, i32:$b)))),
+ (DUP_ZI_H $a, $b)>;
+ def : Pat<(nxv4i32 (AArch64dup (i32 (SVE8BitLslImm i32:$a, i32:$b)))),
+ (DUP_ZI_S $a, $b)>;
+ def : Pat<(nxv2i64 (AArch64dup (i64 (SVE8BitLslImm i32:$a, i32:$b)))),
+ (DUP_ZI_D $a, $b)>;
+
+ // Duplicate FP immediate into all vector elements
+ let AddedComplexity = 2 in {
+ def : Pat<(nxv8f16 (AArch64dup fpimm16:$imm8)),
+ (FDUP_ZI_H fpimm16:$imm8)>;
+ def : Pat<(nxv4f16 (AArch64dup fpimm16:$imm8)),
+ (FDUP_ZI_H fpimm16:$imm8)>;
+ def : Pat<(nxv2f16 (AArch64dup fpimm16:$imm8)),
+ (FDUP_ZI_H fpimm16:$imm8)>;
+ def : Pat<(nxv4f32 (AArch64dup fpimm32:$imm8)),
+ (FDUP_ZI_S fpimm32:$imm8)>;
+ def : Pat<(nxv2f32 (AArch64dup fpimm32:$imm8)),
+ (FDUP_ZI_S fpimm32:$imm8)>;
+ def : Pat<(nxv2f64 (AArch64dup fpimm64:$imm8)),
+ (FDUP_ZI_D fpimm64:$imm8)>;
+ }
+
// Select elements from either vector (predicated)
defm SEL_ZPZZ : sve_int_sel_vvv<"sel", vselect>;
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
index dab5405f23c0..531b1b960147 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
@@ -38,6 +38,42 @@ define <vscale x 2 x i64> @sve_splat_2xi64(i64 %val) {
ret <vscale x 2 x i64> %splat
}
+define <vscale x 16 x i8> @sve_splat_16xi8_imm() {
+; CHECK-LABEL: @sve_splat_16xi8_imm
+; CHECK: mov z0.b, #1
+; CHECK-NEXT: ret
+ %ins = insertelement <vscale x 16 x i8> undef, i8 1, i32 0
+ %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ ret <vscale x 16 x i8> %splat
+}
+
+define <vscale x 8 x i16> @sve_splat_8xi16_imm() {
+; CHECK-LABEL: @sve_splat_8xi16_imm
+; CHECK: mov z0.h, #1
+; CHECK-NEXT: ret
+ %ins = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ ret <vscale x 8 x i16> %splat
+}
+
+define <vscale x 4 x i32> @sve_splat_4xi32_imm() {
+; CHECK-LABEL: @sve_splat_4xi32_imm
+; CHECK: mov z0.s, #1
+; CHECK-NEXT: ret
+ %ins = insertelement <vscale x 4 x i32> undef, i32 1, i32 0
+ %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ ret <vscale x 4 x i32> %splat
+}
+
+define <vscale x 2 x i64> @sve_splat_2xi64_imm() {
+; CHECK-LABEL: @sve_splat_2xi64_imm
+; CHECK: mov z0.d, #1
+; CHECK-NEXT: ret
+ %ins = insertelement <vscale x 2 x i64> undef, i64 1, i32 0
+ %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ ret <vscale x 2 x i64> %splat
+}
+
;; Promote splats of smaller illegal integer vector types
define <vscale x 2 x i8> @sve_splat_2xi8(i8 %val) {
@@ -231,3 +267,57 @@ define <vscale x 2 x double> @splat_nxv2f64_zero() {
; CHECK-NEXT: ret
ret <vscale x 2 x double> zeroinitializer
}
+
+define <vscale x 8 x half> @splat_nxv8f16_imm() {
+; CHECK-LABEL: splat_nxv8f16_imm:
+; CHECK: mov z0.h, #1.0
+; CHECK-NEXT: ret
+ %1 = insertelement <vscale x 8 x half> undef, half 1.0, i32 0
+ %2 = shufflevector <vscale x 8 x half> %1, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+ ret <vscale x 8 x half> %2
+}
+
+define <vscale x 4 x half> @splat_nxv4f16_imm() {
+; CHECK-LABEL: splat_nxv4f16_imm:
+; CHECK: mov z0.h, #1.0
+; CHECK-NEXT: ret
+ %1 = insertelement <vscale x 4 x half> undef, half 1.0, i32 0
+ %2 = shufflevector <vscale x 4 x half> %1, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+ ret <vscale x 4 x half> %2
+}
+
+define <vscale x 2 x half> @splat_nxv2f16_imm() {
+; CHECK-LABEL: splat_nxv2f16_imm:
+; CHECK: mov z0.h, #1.0
+; CHECK-NEXT: ret
+ %1 = insertelement <vscale x 2 x half> undef, half 1.0, i32 0
+ %2 = shufflevector <vscale x 2 x half> %1, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+ ret <vscale x 2 x half> %2
+}
+
+define <vscale x 4 x float> @splat_nxv4f32_imm() {
+; CHECK-LABEL: splat_nxv4f32_imm:
+; CHECK: mov z0.s, #1.0
+; CHECK-NEXT: ret
+ %1 = insertelement <vscale x 4 x float> undef, float 1.0, i32 0
+ %2 = shufflevector <vscale x 4 x float> %1, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ ret <vscale x 4 x float> %2
+}
+
+define <vscale x 2 x float> @splat_nxv2f32_imm() {
+; CHECK-LABEL: splat_nxv2f32_imm:
+; CHECK: mov z0.s, #1.0
+; CHECK-NEXT: ret
+ %1 = insertelement <vscale x 2 x float> undef, float 1.0, i32 0
+ %2 = shufflevector <vscale x 2 x float> %1, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+ ret <vscale x 2 x float> %2
+}
+
+define <vscale x 2 x double> @splat_nxv2f64_imm() {
+; CHECK-LABEL: splat_nxv2f64_imm:
+; CHECK: mov z0.d, #1.0
+; CHECK-NEXT: ret
+ %1 = insertelement <vscale x 2 x double> undef, double 1.0, i32 0
+ %2 = shufflevector <vscale x 2 x double> %1, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+ ret <vscale x 2 x double> %2
+}
More information about the llvm-commits
mailing list