[llvm-branch-commits] [llvm] ad85e39 - [SVE] Add ISel pattern for addvl

Cullen Rhodes via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Jan 13 03:29:20 PST 2021


Author: Cullen Rhodes
Date: 2021-01-13T10:57:49Z
New Revision: ad85e3967067154a579f7989ce0e736f8cd56be9

URL: https://github.com/llvm/llvm-project/commit/ad85e3967067154a579f7989ce0e736f8cd56be9
DIFF: https://github.com/llvm/llvm-project/commit/ad85e3967067154a579f7989ce0e736f8cd56be9.diff

LOG: [SVE] Add ISel pattern for addvl

Reviewed By: cameron.mcinally

Differential Revision: https://reviews.llvm.org/D94504

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/test/CodeGen/AArch64/split-vector-insert.ll
    llvm/test/CodeGen/AArch64/sve-gep.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
    llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll
    llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 50368199effb..cd80f3801fb2 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1648,6 +1648,9 @@ let Predicates = [HasSVE] in {
     def : Pat<(vscale (sve_cntd_imm_neg i32:$imm)), (SUBXrs XZR, (CNTD_XPiI 31, $imm), 0)>;
   }
 
+  def : Pat<(add GPR64:$op, (vscale (sve_rdvl_imm i32:$imm))),
+            (ADDVL_XXI GPR64:$op, $imm)>;
+
   // FIXME: BigEndian requires an additional REV instruction to satisfy the
   // constraint that none of the bits change when stored to memory as one
   // type, and and reloaded as another type.

diff  --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll
index 3fb86ae6b963..cc2ea86cad2e 100644
--- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll
+++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll
@@ -29,27 +29,27 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %
 ; CHECK-NEXT:    lsl x10, x10, #3
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    str q1, [x9, x10]
-; CHECK-NEXT:    addvl x10, sp, #1
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp]
 ; CHECK-NEXT:    mov w9, #2
 ; CHECK-NEXT:    cmp x8, #2 // =2
 ; CHECK-NEXT:    csel x9, x8, x9, lo
+; CHECK-NEXT:    addvl x10, sp, #1
 ; CHECK-NEXT:    lsl x9, x9, #3
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #1, mul vl]
 ; CHECK-NEXT:    str q2, [x10, x9]
-; CHECK-NEXT:    addvl x10, sp, #2
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    mov w9, #4
 ; CHECK-NEXT:    cmp x8, #4 // =4
 ; CHECK-NEXT:    csel x9, x8, x9, lo
+; CHECK-NEXT:    addvl x10, sp, #2
 ; CHECK-NEXT:    lsl x9, x9, #3
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #2, mul vl]
 ; CHECK-NEXT:    str q3, [x10, x9]
-; CHECK-NEXT:    addvl x10, sp, #3
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #2, mul vl]
 ; CHECK-NEXT:    mov w9, #6
 ; CHECK-NEXT:    cmp x8, #6 // =6
 ; CHECK-NEXT:    csel x8, x8, x9, lo
+; CHECK-NEXT:    addvl x10, sp, #3
 ; CHECK-NEXT:    lsl x8, x8, #3
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #3, mul vl]
 ; CHECK-NEXT:    str q4, [x10, x8]
@@ -82,27 +82,27 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x
 ; CHECK-NEXT:    lsl x10, x10, #3
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    str q1, [x9, x10]
-; CHECK-NEXT:    addvl x10, sp, #1
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp]
 ; CHECK-NEXT:    mov w9, #2
 ; CHECK-NEXT:    cmp x8, #2 // =2
 ; CHECK-NEXT:    csel x9, x8, x9, lo
+; CHECK-NEXT:    addvl x10, sp, #1
 ; CHECK-NEXT:    lsl x9, x9, #3
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #1, mul vl]
 ; CHECK-NEXT:    str q2, [x10, x9]
-; CHECK-NEXT:    addvl x10, sp, #2
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    mov w9, #4
 ; CHECK-NEXT:    cmp x8, #4 // =4
 ; CHECK-NEXT:    csel x9, x8, x9, lo
+; CHECK-NEXT:    addvl x10, sp, #2
 ; CHECK-NEXT:    lsl x9, x9, #3
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #2, mul vl]
 ; CHECK-NEXT:    str q3, [x10, x9]
-; CHECK-NEXT:    addvl x10, sp, #3
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #2, mul vl]
 ; CHECK-NEXT:    mov w9, #6
 ; CHECK-NEXT:    cmp x8, #6 // =6
 ; CHECK-NEXT:    csel x8, x8, x9, lo
+; CHECK-NEXT:    addvl x10, sp, #3
 ; CHECK-NEXT:    lsl x8, x8, #3
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp, #3, mul vl]
 ; CHECK-NEXT:    str q4, [x10, x8]

diff  --git a/llvm/test/CodeGen/AArch64/sve-gep.ll b/llvm/test/CodeGen/AArch64/sve-gep.ll
index ffde9289a55d..7e6e8efbb790 100644
--- a/llvm/test/CodeGen/AArch64/sve-gep.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gep.ll
@@ -8,8 +8,7 @@
 define <vscale x 2 x i64>* @scalar_of_scalable_1(<vscale x 2 x i64>* %base) {
 ; CHECK-LABEL: scalar_of_scalable_1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #4
-; CHECK-NEXT:    add x0, x0, x8
+; CHECK-NEXT:    addvl x0, x0, #4
 ; CHECK-NEXT:    ret
   %d = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 4
   ret <vscale x 2 x i64>* %d

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
index 44d4b1d27560..d8fd1973e78e 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
@@ -21,8 +21,7 @@ define <vscale x 16 x i8> @ldnf1b(<vscale x 16 x i1> %pg, i8* %a) {
 define <vscale x 16 x i8> @ldnf1b_out_of_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
 ; CHECK-LABEL: ldnf1b_out_of_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #-9
-; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    addvl x8, x0, #-9
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret
   %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
@@ -71,8 +70,7 @@ define <vscale x 16 x i8> @ldnf1b_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
 define <vscale x 16 x i8> @ldnf1b_out_of_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
 ; CHECK-LABEL: ldnf1b_out_of_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #8
-; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    addvl x8, x0, #8
 ; CHECK-NEXT:    ldnf1b { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret
   %base_scalable = bitcast i8* %a to <vscale x 16 x i8>*

diff  --git a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll
index f83865ac0d07..5cbcee7b85be 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-imm.ll
@@ -43,8 +43,7 @@ define <vscale x 16 x i8> @ld1b_upper_bound(<vscale x 16 x i8>* %a) {
 define <vscale x 16 x i8> @ld1b_out_of_upper_bound(<vscale x 16 x i8>* %a) {
 ; CHECK-LABEL: ld1b_out_of_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #8
-; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    addvl x8, x0, #8
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -56,8 +55,7 @@ define <vscale x 16 x i8> @ld1b_out_of_upper_bound(<vscale x 16 x i8>* %a) {
 define <vscale x 16 x i8> @ld1b_out_of_lower_bound(<vscale x 16 x i8>* %a) {
 ; CHECK-LABEL: ld1b_out_of_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #-9
-; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    addvl x8, x0, #-9
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
index ed0c9f278f0a..4a4a2c8c16ab 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-contiguous-ldst-addressing-mode-reg-imm.ll
@@ -13,11 +13,9 @@
 define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: imm_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #8
-; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    addvl x8, x0, #8
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8]
-; CHECK-NEXT:    rdvl x8, #-9
-; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    addvl x8, x0, #-9
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
   %base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 8

diff  --git a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll
index d4d0b965b235..eb5d5fab545b 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-non-temporal-ldst-addressing-mode-reg-imm.ll
@@ -13,11 +13,9 @@
 define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
 ; CHECK-LABEL: imm_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #8
-; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    addvl x8, x0, #8
 ; CHECK-NEXT:    ldnt1d { z0.d }, p0/z, [x8]
-; CHECK-NEXT:    rdvl x8, #-9
-; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    addvl x8, x0, #-9
 ; CHECK-NEXT:    stnt1d { z0.d }, p0, [x8]
 ; CHECK-NEXT:    ret
   %base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 8

diff  --git a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
index cf1aef43c0ef..a477b0e20050 100644
--- a/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-st1-addressing-mode-reg-imm.ll
@@ -43,8 +43,7 @@ define void @st1b_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %a)
 define void @st1b_out_of_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %a) {
 ; CHECK-LABEL: st1b_out_of_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #8
-; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    addvl x8, x0, #8
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x8]
 ; CHECK-NEXT:    ret
@@ -56,8 +55,7 @@ define void @st1b_out_of_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i8
 define void @st1b_out_of_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i8>* %a) {
 ; CHECK-LABEL: st1b_out_of_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #-9
-; CHECK-NEXT:    add x8, x0, x8
+; CHECK-NEXT:    addvl x8, x0, #-9
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    st1b { z0.b }, p0, [x8]
 ; CHECK-NEXT:    ret


        


More information about the llvm-branch-commits mailing list