[llvm] 5c6ac3b - [AArch64][SVE] Combine add and index_vector

Jun Ma via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 19 20:39:35 PDT 2021


Author: Jun Ma
Date: 2021-04-20T11:38:37+08:00
New Revision: 5c6ac3b4a25e6514e0480cfa14a8ecd6f3962576

URL: https://github.com/llvm/llvm-project/commit/5c6ac3b4a25e6514e0480cfa14a8ecd6f3962576
DIFF: https://github.com/llvm/llvm-project/commit/5c6ac3b4a25e6514e0480cfa14a8ecd6f3962576.diff

LOG: [AArch64][SVE] Combine add and index_vector

This patch tries to combine pattern add(index_vector(zero, step), dup(X)) into index_vector(X, step)

TestPlan: check-llvm

Differential Revision: https://reviews.llvm.org/D100107

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td
    llvm/test/CodeGen/AArch64/sve-stepvector.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index df4e2cd44623..1bd9b1fff207 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -259,6 +259,10 @@ def AArch64dup_mt : SDNode<"AArch64ISD::DUP_MERGE_PASSTHRU", SDT_AArch64DUP_PRED
 
 def SDT_IndexVector : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<1, 2>, SDTCisInt<2>]>;
 def index_vector : SDNode<"AArch64ISD::INDEX_VECTOR", SDT_IndexVector, []>;
+def index_vector_oneuse : PatFrag<(ops node:$base, node:$idx),
+                                  (index_vector node:$base, node:$idx), [{
+  return N->hasOneUse();
+}]>;
 
 def reinterpret_cast : SDNode<"AArch64ISD::REINTERPRET_CAST", SDTUnaryOp>;
 
@@ -1362,10 +1366,10 @@ let Predicates = [HasSVE] in {
   defm INCP_ZP     : sve_int_count_v<0b10000, "incp">;
   defm DECP_ZP     : sve_int_count_v<0b10100, "decp">;
 
-  defm INDEX_RR : sve_int_index_rr<"index", index_vector>;
-  defm INDEX_IR : sve_int_index_ir<"index", index_vector>;
-  defm INDEX_RI : sve_int_index_ri<"index", index_vector>;
-  defm INDEX_II : sve_int_index_ii<"index", index_vector>;
+  defm INDEX_RR : sve_int_index_rr<"index", index_vector, index_vector_oneuse>;
+  defm INDEX_IR : sve_int_index_ir<"index", index_vector, index_vector_oneuse>;
+  defm INDEX_RI : sve_int_index_ri<"index", index_vector, index_vector_oneuse>;
+  defm INDEX_II : sve_int_index_ii<"index", index_vector, index_vector_oneuse>;
 
   // Unpredicated shifts
   defm ASR_ZZI : sve_int_bin_cons_shift_imm_right<0b00, "asr", AArch64asr_p>;

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index eba9379ac030..327c7e540086 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -4777,7 +4777,7 @@ class sve_int_index_ii<bits<2> sz8_64, string asm, ZPRRegOp zprty,
   let Inst{4-0}   = Zd;
 }
 
-multiclass sve_int_index_ii<string asm, SDPatternOperator op> {
+multiclass sve_int_index_ii<string asm, SDPatternOperator op, SDPatternOperator oneuseop> {
   def _B : sve_int_index_ii<0b00, asm, ZPR8, simm5_8b>;
   def _H : sve_int_index_ii<0b01, asm, ZPR16, simm5_16b>;
   def _S : sve_int_index_ii<0b10, asm, ZPR32, simm5_32b>;
@@ -4791,6 +4791,16 @@ multiclass sve_int_index_ii<string asm, SDPatternOperator op> {
             (!cast<Instruction>(NAME # "_S") simm5_32b:$imm5, simm5_32b:$imm5b)>;
   def : Pat<(nxv2i64 (op simm5_64b:$imm5, simm5_64b:$imm5b)),
             (!cast<Instruction>(NAME # "_D") simm5_64b:$imm5, simm5_64b:$imm5b)>;
+
+  // add(index_vector(zero, step), dup(X)) -> index_vector(X, step).
+  def : Pat<(add (nxv16i8 (oneuseop (i32 0), simm5_8b:$imm5b)), (nxv16i8 (AArch64dup(simm5_8b:$imm5)))),
+            (!cast<Instruction>(NAME # "_B") simm5_8b:$imm5, simm5_8b:$imm5b)>;
+  def : Pat<(add (nxv8i16 (oneuseop (i32 0), simm5_16b:$imm5b)), (nxv8i16 (AArch64dup(simm5_16b:$imm5)))),
+            (!cast<Instruction>(NAME # "_H") simm5_16b:$imm5, simm5_16b:$imm5b)>;
+  def : Pat<(add (nxv4i32 (oneuseop (i32 0), simm5_32b:$imm5b)), (nxv4i32 (AArch64dup(simm5_32b:$imm5)))),
+            (!cast<Instruction>(NAME # "_S") simm5_32b:$imm5, simm5_32b:$imm5b)>;
+  def : Pat<(add (nxv2i64 (oneuseop (i64 0), simm5_64b:$imm5b)), (nxv2i64 (AArch64dup(simm5_64b:$imm5)))),
+            (!cast<Instruction>(NAME # "_D") simm5_64b:$imm5, simm5_64b:$imm5b)>;
 }
 
 class sve_int_index_ir<bits<2> sz8_64, string asm, ZPRRegOp zprty,
@@ -4810,7 +4820,7 @@ class sve_int_index_ir<bits<2> sz8_64, string asm, ZPRRegOp zprty,
   let Inst{4-0}   = Zd;
 }
 
-multiclass sve_int_index_ir<string asm, SDPatternOperator op> {
+multiclass sve_int_index_ir<string asm, SDPatternOperator op, SDPatternOperator oneuseop> {
   def _B : sve_int_index_ir<0b00, asm, ZPR8, GPR32, simm5_8b>;
   def _H : sve_int_index_ir<0b01, asm, ZPR16, GPR32, simm5_16b>;
   def _S : sve_int_index_ir<0b10, asm, ZPR32, GPR32, simm5_32b>;
@@ -4824,6 +4834,17 @@ multiclass sve_int_index_ir<string asm, SDPatternOperator op> {
             (!cast<Instruction>(NAME # "_S") simm5_32b:$imm5, GPR32:$Rm)>;
   def : Pat<(nxv2i64 (op simm5_64b:$imm5, GPR64:$Rm)),
             (!cast<Instruction>(NAME # "_D") simm5_64b:$imm5, GPR64:$Rm)>;
+
+  // add(index_vector(zero, step), dup(X)) -> index_vector(X, step).
+  def : Pat<(add (nxv16i8 (oneuseop (i32 0), GPR32:$Rm)), (nxv16i8 (AArch64dup(simm5_8b:$imm5)))),
+            (!cast<Instruction>(NAME # "_B") simm5_8b:$imm5, GPR32:$Rm)>;
+  def : Pat<(add (nxv8i16 (oneuseop (i32 0), GPR32:$Rm)), (nxv8i16 (AArch64dup(simm5_16b:$imm5)))),
+            (!cast<Instruction>(NAME # "_H") simm5_16b:$imm5, GPR32:$Rm)>;
+  def : Pat<(add (nxv4i32 (oneuseop (i32 0), GPR32:$Rm)), (nxv4i32 (AArch64dup(simm5_32b:$imm5)))),
+            (!cast<Instruction>(NAME # "_S") simm5_32b:$imm5, GPR32:$Rm)>;
+  def : Pat<(add (nxv2i64 (oneuseop (i64 0), GPR64:$Rm)), (nxv2i64 (AArch64dup(simm5_64b:$imm5)))),
+            (!cast<Instruction>(NAME # "_D") simm5_64b:$imm5, GPR64:$Rm)>;
+
 }
 
 class sve_int_index_ri<bits<2> sz8_64, string asm, ZPRRegOp zprty,
@@ -4843,7 +4864,7 @@ class sve_int_index_ri<bits<2> sz8_64, string asm, ZPRRegOp zprty,
   let Inst{4-0}   = Zd;
 }
 
-multiclass sve_int_index_ri<string asm, SDPatternOperator op> {
+multiclass sve_int_index_ri<string asm, SDPatternOperator op, SDPatternOperator oneuseop> {
   def _B : sve_int_index_ri<0b00, asm, ZPR8, GPR32, simm5_8b>;
   def _H : sve_int_index_ri<0b01, asm, ZPR16, GPR32, simm5_16b>;
   def _S : sve_int_index_ri<0b10, asm, ZPR32, GPR32, simm5_32b>;
@@ -4857,6 +4878,16 @@ multiclass sve_int_index_ri<string asm, SDPatternOperator op> {
             (!cast<Instruction>(NAME # "_S") GPR32:$Rm, simm5_32b:$imm5)>;
   def : Pat<(nxv2i64 (op GPR64:$Rm, simm5_64b:$imm5)),
             (!cast<Instruction>(NAME # "_D") GPR64:$Rm, simm5_64b:$imm5)>;
+
+  // add(index_vector(zero, step), dup(X)) -> index_vector(X, step).
+  def : Pat<(add (nxv16i8 (oneuseop (i32 0), simm5_8b:$imm5)), (nxv16i8 (AArch64dup(i32 GPR32:$Rm)))),
+            (!cast<Instruction>(NAME # "_B") GPR32:$Rm, simm5_8b:$imm5)>;
+  def : Pat<(add (nxv8i16 (oneuseop (i32 0), simm5_16b:$imm5)), (nxv8i16 (AArch64dup(i32 GPR32:$Rm)))),
+            (!cast<Instruction>(NAME # "_H") GPR32:$Rm, simm5_16b:$imm5)>;
+  def : Pat<(add (nxv4i32 (oneuseop (i32 0), simm5_32b:$imm5)), (nxv4i32 (AArch64dup(i32 GPR32:$Rm)))),
+            (!cast<Instruction>(NAME # "_S") GPR32:$Rm, simm5_32b:$imm5)>;
+  def : Pat<(add (nxv2i64 (oneuseop (i64 0), simm5_64b:$imm5)), (nxv2i64 (AArch64dup(i64 GPR64:$Rm)))),
+            (!cast<Instruction>(NAME # "_D") GPR64:$Rm, simm5_64b:$imm5)>;
 }
 
 class sve_int_index_rr<bits<2> sz8_64, string asm, ZPRRegOp zprty,
@@ -4876,7 +4907,7 @@ class sve_int_index_rr<bits<2> sz8_64, string asm, ZPRRegOp zprty,
   let Inst{4-0}   = Zd;
 }
 
-multiclass sve_int_index_rr<string asm, SDPatternOperator op> {
+multiclass sve_int_index_rr<string asm, SDPatternOperator op, SDPatternOperator oneuseop> {
   def _B : sve_int_index_rr<0b00, asm, ZPR8, GPR32>;
   def _H : sve_int_index_rr<0b01, asm, ZPR16, GPR32>;
   def _S : sve_int_index_rr<0b10, asm, ZPR32, GPR32>;
@@ -4886,6 +4917,16 @@ multiclass sve_int_index_rr<string asm, SDPatternOperator op> {
   def : SVE_2_Op_Pat<nxv8i16, op, i32, i32, !cast<Instruction>(NAME # _H)>;
   def : SVE_2_Op_Pat<nxv4i32, op, i32, i32, !cast<Instruction>(NAME # _S)>;
   def : SVE_2_Op_Pat<nxv2i64, op, i64, i64, !cast<Instruction>(NAME # _D)>;
+
+  // add(index_vector(zero, step), dup(X)) -> index_vector(X, step).
+  def : Pat<(add (nxv16i8 (oneuseop (i32 0), GPR32:$Rm)), (nxv16i8 (AArch64dup(i32 GPR32:$Rn)))),
+            (!cast<Instruction>(NAME # "_B") GPR32:$Rn, GPR32:$Rm)>;
+  def : Pat<(add (nxv8i16 (oneuseop (i32 0), GPR32:$Rm)), (nxv8i16 (AArch64dup(i32 GPR32:$Rn)))),
+            (!cast<Instruction>(NAME # "_H") GPR32:$Rn, GPR32:$Rm)>;
+  def : Pat<(add (nxv4i32 (oneuseop (i32 0), GPR32:$Rm)), (nxv4i32 (AArch64dup(i32 GPR32:$Rn)))),
+            (!cast<Instruction>(NAME # "_S") GPR32:$Rn, GPR32:$Rm)>;
+  def : Pat<(add (nxv2i64 (oneuseop (i64 0), GPR64:$Rm)), (nxv2i64 (AArch64dup(i64 GPR64:$Rn)))),
+            (!cast<Instruction>(NAME # "_D") GPR64:$Rn, GPR64:$Rm)>;
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/AArch64/sve-stepvector.ll b/llvm/test/CodeGen/AArch64/sve-stepvector.ll
index d121fb46be44..524629ecfd84 100644
--- a/llvm/test/CodeGen/AArch64/sve-stepvector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-stepvector.ll
@@ -131,6 +131,108 @@ entry:
   ret <vscale x 8 x i8> %3
 }
 
+define <vscale x 8 x i8> @add_stepvector_nxv8i8_2() {
+; CHECK-LABEL: add_stepvector_nxv8i8_2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    index z0.h, #2, #1
+; CHECK-NEXT:    ret
+entry:
+  %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
+  %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %3 = add <vscale x 8 x i8> %2, %1
+  ret <vscale x 8 x i8> %3
+}
+
+define <vscale x 8 x i8> @add_stepvector_nxv8i8_2_commutative() {
+; CHECK-LABEL: add_stepvector_nxv8i8_2_commutative:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    index z0.h, #2, #1
+; CHECK-NEXT:    ret
+entry:
+  %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
+  %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %3 = add <vscale x 8 x i8> %1, %2
+  ret <vscale x 8 x i8> %3
+}
+
+define <vscale x 8 x i16> @add_stepvector_nxv8i16_1(i16 %data) {
+; CHECK-LABEL: add_stepvector_nxv8i16_1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    index z0.h, w0, #1
+; CHECK-NEXT:    ret
+entry:
+  %0 = insertelement <vscale x 8 x i16> poison, i16 %data, i32 0
+  %1 = shufflevector <vscale x 8 x i16> %0, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %2 = call <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
+  %3 = add <vscale x 8 x i16> %2, %1
+  ret <vscale x 8 x i16> %3
+}
+
+define <vscale x 4 x i32> @add_stepvector_nxv4i32_1(i32 %data) {
+; CHECK-LABEL: add_stepvector_nxv4i32_1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    index z0.s, w0, #1
+; CHECK-NEXT:    ret
+entry:
+  %0 = insertelement <vscale x 4 x i32> poison, i32 %data, i32 0
+  %1 = shufflevector <vscale x 4 x i32> %0, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %2 = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+  %3 = add <vscale x 4 x i32> %2, %1
+  ret <vscale x 4 x i32> %3
+}
+
+define <vscale x 4 x i32> @multiple_use_stepvector_nxv4i32_1(i32 %data) {
+; CHECK-LABEL: multiple_use_stepvector_nxv4i32_1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov z0.s, w0
+; CHECK-NEXT:    index z1.s, w0, #1
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mul z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    sub z0.s, z0.s, z1.s
+; CHECK-NEXT:    ret
+entry:
+  %0 = insertelement <vscale x 4 x i32> poison, i32 %data, i32 0
+  %1 = shufflevector <vscale x 4 x i32> %0, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %2 = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+  %3 = add <vscale x 4 x i32> %2, %1
+  %4 = mul <vscale x 4 x i32> %1, %3
+  %5 = sub <vscale x 4 x i32> %4, %3
+  ret <vscale x 4 x i32> %5
+}
+
+define <vscale x 2 x i64> @add_stepvector_nxv2i64_1(i64 %data) {
+; CHECK-LABEL: add_stepvector_nxv2i64_1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    index z0.d, x0, #1
+; CHECK-NEXT:    ret
+entry:
+  %0 = insertelement <vscale x 2 x i64> poison, i64 %data, i32 0
+  %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %2 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %3 = add <vscale x 2 x i64> %1, %2
+  ret <vscale x 2 x i64> %3
+}
+
+define <vscale x 2 x i64> @multiple_use_stepvector_nxv2i64_1(i64 %data) {
+; CHECK-LABEL: multiple_use_stepvector_nxv2i64_1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov z0.d, x0
+; CHECK-NEXT:    index z1.d, #0, #1
+; CHECK-NEXT:    add z0.d, z0.d, z1.d
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    mul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    ret
+entry:
+  %0 = insertelement <vscale x 2 x i64> poison, i64 %data, i32 0
+  %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %2 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %3 = add <vscale x 2 x i64> %1, %2
+  %4 = mul <vscale x 2 x i64> %3, %2
+  ret <vscale x 2 x i64> %4
+}
+
 define <vscale x 8 x i8> @mul_stepvector_nxv8i8() {
 ; CHECK-LABEL: mul_stepvector_nxv8i8:
 ; CHECK:       // %bb.0: // %entry


        


More information about the llvm-commits mailing list