[llvm] 645b8f5 - [AArch64][SVE] Add patterns to generate ADR instruction
Usman Nadeem via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 21 15:51:25 PDT 2021
Author: Usman Nadeem
Date: 2021-09-21T15:50:49-07:00
New Revision: 645b8f5365de49cbced4286f86e4a149c56600d3
URL: https://github.com/llvm/llvm-project/commit/645b8f5365de49cbced4286f86e4a149c56600d3
DIFF: https://github.com/llvm/llvm-project/commit/645b8f5365de49cbced4286f86e4a149c56600d3.diff
LOG: [AArch64][SVE] Add patterns to generate ADR instruction
Differential Revision: https://reviews.llvm.org/D109665
Change-Id: I9d2928688b80b804a16f52928e2057749ec2c0b2
Added:
llvm/test/CodeGen/AArch64/sve-adr.ll
Modified:
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
llvm/test/CodeGen/AArch64/sve-gep.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index aaaf679900c1..5059185c7618 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -1196,6 +1196,51 @@ let Predicates = [HasSVE] in {
(ADR_LSL_ZZZ_D_2 $Op1, $Op2)>;
def : Pat<(nxv2i64 (int_aarch64_sve_adrd nxv2i64:$Op1, nxv2i64:$Op2)),
(ADR_LSL_ZZZ_D_3 $Op1, $Op2)>;
+
+ // Patterns to generate adr instruction.
+ // adr z0.d, [z0.d, z0.d, uxtw]
+ def : Pat<(add nxv2i64:$Op1,
+ (nxv2i64 (and nxv2i64:$Op2, (nxv2i64 (AArch64dup (i64 0xFFFFFFFF)))))),
+ (ADR_UXTW_ZZZ_D_0 $Op1, $Op2)>;
+ // adr z0.d, [z0.d, z0.d, sxtw]
+ def : Pat<(add nxv2i64:$Op1,
+ (nxv2i64 (sext_inreg nxv2i64:$Op2, nxv2i32))),
+ (ADR_SXTW_ZZZ_D_0 $Op1, $Op2)>;
+
+ // adr z0.s, [z0.s, z0.s, lsl #<shift>]
+ // adr z0.d, [z0.d, z0.d, lsl #<shift>]
+ multiclass adrShiftPat<ValueType Ty, ValueType PredTy, ValueType ShiftTy, Instruction DestAdrIns, int ShiftAmt> {
+ def : Pat<(add Ty:$Op1,
+ (Ty (AArch64lsl_p (PredTy (SVEAllActive)),
+ Ty:$Op2,
+ (Ty (AArch64dup (ShiftTy ShiftAmt)))))),
+ (DestAdrIns $Op1, $Op2)>;
+ }
+ defm : adrShiftPat<nxv2i64, nxv2i1, i64, ADR_LSL_ZZZ_D_1, 1>;
+ defm : adrShiftPat<nxv2i64, nxv2i1, i64, ADR_LSL_ZZZ_D_2, 2>;
+ defm : adrShiftPat<nxv2i64, nxv2i1, i64, ADR_LSL_ZZZ_D_3, 3>;
+ defm : adrShiftPat<nxv4i32, nxv4i1, i32, ADR_LSL_ZZZ_S_1, 1>;
+ defm : adrShiftPat<nxv4i32, nxv4i1, i32, ADR_LSL_ZZZ_S_2, 2>;
+ defm : adrShiftPat<nxv4i32, nxv4i1, i32, ADR_LSL_ZZZ_S_3, 3>;
+
+ // adr z0.d, [z0.d, z0.d, uxtw #<shift>]
+ // adr z0.d, [z0.d, z0.d, sxtw #<shift>]
+ multiclass adrXtwShiftPat<ValueType Ty, ValueType PredTy, int ShiftAmt> {
+ def : Pat<(add Ty:$Op1,
+ (Ty (AArch64lsl_p (PredTy (SVEAllActive)),
+ (Ty (and Ty:$Op2, (Ty (AArch64dup (i64 0xFFFFFFFF))))),
+ (Ty (AArch64dup (i64 ShiftAmt)))))),
+ (!cast<Instruction>("ADR_UXTW_ZZZ_D_"#ShiftAmt) $Op1, $Op2)>;
+
+ def : Pat<(add Ty:$Op1,
+ (Ty (AArch64lsl_p (PredTy (SVEAllActive)),
+ (Ty (sext_inreg Ty:$Op2, nxv2i32)),
+ (Ty (AArch64dup (i64 ShiftAmt)))))),
+ (!cast<Instruction>("ADR_SXTW_ZZZ_D_"#ShiftAmt) $Op1, $Op2)>;
+ }
+ defm : adrXtwShiftPat<nxv2i64, nxv2i1, 1>;
+ defm : adrXtwShiftPat<nxv2i64, nxv2i1, 2>;
+ defm : adrXtwShiftPat<nxv2i64, nxv2i1, 3>;
} // End HasSVE
let Predicates = [HasSVEorStreamingSVE] in {
diff --git a/llvm/test/CodeGen/AArch64/sve-adr.ll b/llvm/test/CodeGen/AArch64/sve-adr.ll
new file mode 100644
index 000000000000..8f8477d8f3f1
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-adr.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; ADR
+; Tests adr z0.s, [z0.s, z0.s, lsl #<1,2,3>]
+; Other formats are tested in llvm/test/CodeGen/AArch64/sve-gep.ll
+;
+
+define <vscale x 4 x i32> @adr_32bit_lsl1(<vscale x 4 x i32> %base, <vscale x 4 x i32> %idx) #0 {
+; CHECK-LABEL: adr_32bit_lsl1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adr z0.s, [z0.s, z1.s, lsl #1]
+; CHECK-NEXT: ret
+ %splat_insert = insertelement <vscale x 4 x i32> poison, i32 1, i32 0
+ %one = shufflevector <vscale x 4 x i32> %splat_insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %shiftedOffset = shl <vscale x 4 x i32> %idx, %one
+ %address = add <vscale x 4 x i32> %base, %shiftedOffset
+ ret <vscale x 4 x i32> %address
+}
+
+define <vscale x 4 x i32> @adr_32bit_lsl2(<vscale x 4 x i32> %base, <vscale x 4 x i32> %idx) #0 {
+; CHECK-LABEL: adr_32bit_lsl2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adr z0.s, [z0.s, z1.s, lsl #2]
+; CHECK-NEXT: ret
+ %splat_insert = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
+ %two = shufflevector <vscale x 4 x i32> %splat_insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %shiftedOffset = shl <vscale x 4 x i32> %idx, %two
+ %address = add <vscale x 4 x i32> %base, %shiftedOffset
+ ret <vscale x 4 x i32> %address
+}
+
+define <vscale x 4 x i32> @adr_32bit_lsl3(<vscale x 4 x i32> %base, <vscale x 4 x i32> %idx) #0 {
+; CHECK-LABEL: adr_32bit_lsl3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adr z0.s, [z0.s, z1.s, lsl #3]
+; CHECK-NEXT: ret
+ %splat_insert = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
+ %three = shufflevector <vscale x 4 x i32> %splat_insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %shiftedOffset = shl <vscale x 4 x i32> %idx, %three
+ %address = add <vscale x 4 x i32> %base, %shiftedOffset
+ ret <vscale x 4 x i32> %address
+}
+
+attributes #0 = { "target-features"="+sve" }
diff --git a/llvm/test/CodeGen/AArch64/sve-gep.ll b/llvm/test/CodeGen/AArch64/sve-gep.ll
index 0fee23bfac4c..0dd1152e3824 100644
--- a/llvm/test/CodeGen/AArch64/sve-gep.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gep.ll
@@ -87,9 +87,8 @@ define <vscale x 2 x i8*> @scalable_of_fixed_3_i8(i8* %base, <vscale x 2 x i64>
define <vscale x 2 x i16*> @scalable_of_fixed_3_i16(i16* %base, <vscale x 2 x i64> %idx) {
; CHECK-LABEL: scalable_of_fixed_3_i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: lsl z0.d, z0.d, #1
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #1]
; CHECK-NEXT: ret
%d = getelementptr i16, i16* %base, <vscale x 2 x i64> %idx
ret <vscale x 2 x i16*> %d
@@ -98,9 +97,8 @@ define <vscale x 2 x i16*> @scalable_of_fixed_3_i16(i16* %base, <vscale x 2 x i6
define <vscale x 2 x i32*> @scalable_of_fixed_3_i32(i32* %base, <vscale x 2 x i64> %idx) {
; CHECK-LABEL: scalable_of_fixed_3_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: lsl z0.d, z0.d, #2
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #2]
; CHECK-NEXT: ret
%d = getelementptr i32, i32* %base, <vscale x 2 x i64> %idx
ret <vscale x 2 x i32*> %d
@@ -109,9 +107,8 @@ define <vscale x 2 x i32*> @scalable_of_fixed_3_i32(i32* %base, <vscale x 2 x i6
define <vscale x 2 x i64*> @scalable_of_fixed_3_i64(i64* %base, <vscale x 2 x i64> %idx) {
; CHECK-LABEL: scalable_of_fixed_3_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: lsl z0.d, z0.d, #3
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, lsl #3]
; CHECK-NEXT: ret
%d = getelementptr i64, i64* %base, <vscale x 2 x i64> %idx
ret <vscale x 2 x i64*> %d
@@ -120,10 +117,8 @@ define <vscale x 2 x i64*> @scalable_of_fixed_3_i64(i64* %base, <vscale x 2 x i6
define <vscale x 2 x i8*> @scalable_of_fixed_4_i8(i8* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_4_i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw]
; CHECK-NEXT: ret
%d = getelementptr i8, i8* %base, <vscale x 2 x i32> %idx
ret <vscale x 2 x i8*> %d
@@ -132,11 +127,8 @@ define <vscale x 2 x i8*> @scalable_of_fixed_4_i8(i8* %base, <vscale x 2 x i32>
define <vscale x 2 x i16*> @scalable_of_fixed_4_i16(i16* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_4_i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
-; CHECK-NEXT: lsl z0.d, z0.d, #1
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #1]
; CHECK-NEXT: ret
%d = getelementptr i16, i16* %base, <vscale x 2 x i32> %idx
ret <vscale x 2 x i16*> %d
@@ -145,11 +137,8 @@ define <vscale x 2 x i16*> @scalable_of_fixed_4_i16(i16* %base, <vscale x 2 x i3
define <vscale x 2 x i32*> @scalable_of_fixed_4_i32(i32* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_4_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
-; CHECK-NEXT: lsl z0.d, z0.d, #2
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #2]
; CHECK-NEXT: ret
%d = getelementptr i32, i32* %base, <vscale x 2 x i32> %idx
ret <vscale x 2 x i32*> %d
@@ -158,11 +147,8 @@ define <vscale x 2 x i32*> @scalable_of_fixed_4_i32(i32* %base, <vscale x 2 x i3
define <vscale x 2 x i64*> @scalable_of_fixed_4_i64(i64* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_4_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
-; CHECK-NEXT: lsl z0.d, z0.d, #3
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, sxtw #3]
; CHECK-NEXT: ret
%d = getelementptr i64, i64* %base, <vscale x 2 x i32> %idx
ret <vscale x 2 x i64*> %d
@@ -172,8 +158,7 @@ define <vscale x 2 x i8*> @scalable_of_fixed_5(i8* %base, <vscale x 2 x i32> %id
; CHECK-LABEL: scalable_of_fixed_5:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw]
; CHECK-NEXT: ret
%idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
%d = getelementptr i8, i8* %base, <vscale x 2 x i64> %idxZext
@@ -183,10 +168,8 @@ define <vscale x 2 x i8*> @scalable_of_fixed_5(i8* %base, <vscale x 2 x i32> %id
define <vscale x 2 x i16*> @scalable_of_fixed_5_i16(i16* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_5_i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: lsl z0.d, z0.d, #1
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #1]
; CHECK-NEXT: ret
%idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
%d = getelementptr i16, i16* %base, <vscale x 2 x i64> %idxZext
@@ -196,10 +179,8 @@ define <vscale x 2 x i16*> @scalable_of_fixed_5_i16(i16* %base, <vscale x 2 x i3
define <vscale x 2 x i32*> @scalable_of_fixed_5_i32(i32* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_5_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: lsl z0.d, z0.d, #2
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #2]
; CHECK-NEXT: ret
%idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
%d = getelementptr i32, i32* %base, <vscale x 2 x i64> %idxZext
@@ -210,10 +191,8 @@ define <vscale x 2 x i32*> @scalable_of_fixed_5_i32(i32* %base, <vscale x 2 x i3
define <vscale x 2 x i64*> @scalable_of_fixed_5_i64(i64* %base, <vscale x 2 x i32> %idx) {
; CHECK-LABEL: scalable_of_fixed_5_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
; CHECK-NEXT: mov z1.d, x0
-; CHECK-NEXT: lsl z0.d, z0.d, #3
-; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw #3]
; CHECK-NEXT: ret
%idxZext = zext <vscale x 2 x i32> %idx to <vscale x 2 x i64>
%d = getelementptr i64, i64* %base, <vscale x 2 x i64> %idxZext
More information about the llvm-commits
mailing list