[llvm] [AArch64] Use INDEX for constant Neon step vectors (PR #113424)

Ricardo Jesus via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 23 04:18:42 PDT 2024


https://github.com/rj-jesus updated https://github.com/llvm/llvm-project/pull/113424

>From a80656b592cf265b52d7e10040ac12e0368a3875 Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Wed, 23 Oct 2024 04:07:41 -0700
Subject: [PATCH 1/2] [AArch64] Add new tests for INDEX fold

---
 .../AArch64/sve-index-const-step-vector.ll    | 130 ++++++++++++++++++
 1 file changed, 130 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll

diff --git a/llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll b/llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll
new file mode 100644
index 00000000000000..b0128801d7ef46
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll
@@ -0,0 +1,130 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
+
+; 128-bit vectors
+
+define <16 x i8> @v16i8() #0 {
+; CHECK-LABEL: v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    ret
+  ret <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>
+}
+
+define <8 x i16> @v8i16() #0 {
+; CHECK-LABEL: v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI1_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI1_0]
+; CHECK-NEXT:    ret
+  ret <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
+}
+
+define <4 x i32> @v4i32() #0 {
+; CHECK-LABEL: v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI2_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI2_0]
+; CHECK-NEXT:    ret
+  ret <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+}
+
+define <2 x i64> @v2i64() #0 {
+; CHECK-LABEL: v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI3_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT:    ret
+  ret <2 x i64> <i64 0, i64 1>
+}
+
+; 64-bit vectors
+
+define <8 x i8> @v8i8() #0 {
+; CHECK-LABEL: v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI4_0
+; CHECK-NEXT:    ldr d0, [x8, :lo12:.LCPI4_0]
+; CHECK-NEXT:    ret
+  ret <8 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
+}
+
+define <4 x i16> @v4i16() #0 {
+; CHECK-LABEL: v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI5_0
+; CHECK-NEXT:    ldr d0, [x8, :lo12:.LCPI5_0]
+; CHECK-NEXT:    ret
+  ret <4 x i16> <i16 0, i16 1, i16 2, i16 3>
+}
+
+define <2 x i32> @v2i32() #0 {
+; CHECK-LABEL: v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI6_0
+; CHECK-NEXT:    ldr d0, [x8, :lo12:.LCPI6_0]
+; CHECK-NEXT:    ret
+  ret <2 x i32> <i32 0, i32 1>
+}
+
+; Positive test, non-zero start and non-unitary step.
+; Note: This should be INDEX z0.s, #1, #2 (without the ORR).
+define <4 x i32> @v4i32_non_zero_non_one() #0 {
+; CHECK-LABEL: v4i32_non_zero_non_one:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI7_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI7_0]
+; CHECK-NEXT:    ret
+  ret <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+}
+
+; Positive test, same as above but negative immediates.
+define <4 x i32> @v4i32_neg_immediates() #0 {
+; CHECK-LABEL: v4i32_neg_immediates:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI8_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI8_0]
+; CHECK-NEXT:    ret
+  ret <4 x i32> <i32 -1, i32 -3, i32 -5, i32 -7>
+}
+
+; Positive test, out of imm range start.
+define <4 x i32> @v4i32_out_range_start() #0 {
+; CHECK-LABEL: v4i32_out_range_start:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI9_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI9_0]
+; CHECK-NEXT:    ret
+  ret <4 x i32> <i32 16, i32 17, i32 18, i32 19>
+}
+
+; Positive test, out of imm range step.
+define <4 x i32> @v4i32_out_range_step() #0 {
+; CHECK-LABEL: v4i32_out_range_step:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI10_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI10_0]
+; CHECK-NEXT:    ret
+  ret <4 x i32> <i32 0, i32 16, i32 32, i32 48>
+}
+
+; Positive test, out of imm range start and step.
+define <4 x i32> @v4i32_out_range_start_step() #0 {
+; CHECK-LABEL: v4i32_out_range_start_step:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI11_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI11_0]
+; CHECK-NEXT:    ret
+  ret <4 x i32> <i32 16, i32 32, i32 48, i32 64>
+}
+
+; Negative test, non sequential.
+define <4 x i32> @v4i32_non_sequential() #0 {
+; CHECK-LABEL: v4i32_non_sequential:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI12_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI12_0]
+; CHECK-NEXT:    ret
+  ret <4 x i32> <i32 0, i32 2, i32 2, i32 3>
+}

>From 44a1040e41e118400f1d347b49a089457f40f314 Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Thu, 17 Oct 2024 09:52:41 -0700
Subject: [PATCH 2/2] [AArch64] Use INDEX for constant Neon step vectors

When compiling for an SVE target we can use INDEX to generate constant
fixed-length step vectors.

The logic for this was already in `LowerBUILD_VECTOR`, though it was
hidden under `!Subtarget->isNeonAvailable()`. This patch refactors this
to enable the corresponding code path unconditionally for constant step
vectors (as long as we can use SVE for them).
---
 .../Target/AArch64/AArch64ISelLowering.cpp    |  4 +-
 llvm/test/CodeGen/AArch64/active_lane_mask.ll | 20 +++----
 .../AArch64/sve-index-const-step-vector.ll    | 53 ++++++++++---------
 3 files changed, 40 insertions(+), 37 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4aa123b42d1966..e016a905c934bf 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -14512,7 +14512,9 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
                                                  SelectionDAG &DAG) const {
   EVT VT = Op.getValueType();
 
-  if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()))
+  bool OverrideNEON = !Subtarget->isNeonAvailable() ||
+                      cast<BuildVectorSDNode>(Op)->isConstantSequence();
+  if (useSVEForFixedLengthVectorVT(VT, OverrideNEON))
     return LowerFixedLengthBuildVectorToSVE(Op, DAG);
 
   // Try to build a simple constant vector.
diff --git a/llvm/test/CodeGen/AArch64/active_lane_mask.ll b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
index bd5d076d1ba82e..025bbf749fc71b 100644
--- a/llvm/test/CodeGen/AArch64/active_lane_mask.ll
+++ b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
@@ -430,10 +430,9 @@ define <2 x i1> @lane_mask_v2i1_i64(i64 %index, i64 %TC) {
 define <16 x i1> @lane_mask_v16i1_i8(i8 %index, i8 %TC) {
 ; CHECK-LABEL: lane_mask_v16i1_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI24_0
-; CHECK-NEXT:    dup v0.16b, w0
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI24_0]
-; CHECK-NEXT:    uqadd v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    dup v1.16b, w0
+; CHECK-NEXT:    uqadd v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    dup v1.16b, w1
 ; CHECK-NEXT:    cmhi v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    ret
@@ -444,10 +443,9 @@ define <16 x i1> @lane_mask_v16i1_i8(i8 %index, i8 %TC) {
 define <8 x i1> @lane_mask_v8i1_i8(i8 %index, i8 %TC) {
 ; CHECK-LABEL: lane_mask_v8i1_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    dup v0.8b, w0
-; CHECK-NEXT:    adrp x8, .LCPI25_0
-; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI25_0]
-; CHECK-NEXT:    uqadd v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    dup v1.8b, w0
+; CHECK-NEXT:    uqadd v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    dup v1.8b, w1
 ; CHECK-NEXT:    cmhi v0.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    ret
@@ -459,9 +457,8 @@ define <4 x i1> @lane_mask_v4i1_i8(i8 %index, i8 %TC) {
 ; CHECK-LABEL: lane_mask_v4i1_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    dup v0.4h, w0
-; CHECK-NEXT:    adrp x8, .LCPI26_0
+; CHECK-NEXT:    index z1.h, #0, #1
 ; CHECK-NEXT:    movi d2, #0xff00ff00ff00ff
-; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI26_0]
 ; CHECK-NEXT:    dup v3.4h, w1
 ; CHECK-NEXT:    bic v0.4h, #255, lsl #8
 ; CHECK-NEXT:    bic v3.4h, #255, lsl #8
@@ -478,8 +475,7 @@ define <2 x i1> @lane_mask_v2i1_i8(i8 %index, i8 %TC) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi d0, #0x0000ff000000ff
 ; CHECK-NEXT:    dup v1.2s, w0
-; CHECK-NEXT:    adrp x8, .LCPI27_0
-; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI27_0]
+; CHECK-NEXT:    index z2.s, #0, #1
 ; CHECK-NEXT:    dup v3.2s, w1
 ; CHECK-NEXT:    and v1.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    add v1.2s, v1.2s, v2.2s
diff --git a/llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll b/llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll
index b0128801d7ef46..433ddbd4a261b2 100644
--- a/llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-index-const-step-vector.ll
@@ -6,8 +6,8 @@
 define <16 x i8> @v16i8() #0 {
 ; CHECK-LABEL: v16i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI0_0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   ret <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>
 }
@@ -15,8 +15,8 @@ define <16 x i8> @v16i8() #0 {
 define <8 x i16> @v8i16() #0 {
 ; CHECK-LABEL: v8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI1_0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI1_0]
+; CHECK-NEXT:    index z0.h, #0, #1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   ret <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
 }
@@ -24,8 +24,8 @@ define <8 x i16> @v8i16() #0 {
 define <4 x i32> @v4i32() #0 {
 ; CHECK-LABEL: v4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI2_0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI2_0]
+; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   ret <4 x i32> <i32 0, i32 1, i32 2, i32 3>
 }
@@ -33,8 +33,8 @@ define <4 x i32> @v4i32() #0 {
 define <2 x i64> @v2i64() #0 {
 ; CHECK-LABEL: v2i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI3_0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT:    index z0.d, #0, #1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   ret <2 x i64> <i64 0, i64 1>
 }
@@ -44,8 +44,8 @@ define <2 x i64> @v2i64() #0 {
 define <8 x i8> @v8i8() #0 {
 ; CHECK-LABEL: v8i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI4_0
-; CHECK-NEXT:    ldr d0, [x8, :lo12:.LCPI4_0]
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   ret <8 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
 }
@@ -53,8 +53,8 @@ define <8 x i8> @v8i8() #0 {
 define <4 x i16> @v4i16() #0 {
 ; CHECK-LABEL: v4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI5_0
-; CHECK-NEXT:    ldr d0, [x8, :lo12:.LCPI5_0]
+; CHECK-NEXT:    index z0.h, #0, #1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   ret <4 x i16> <i16 0, i16 1, i16 2, i16 3>
 }
@@ -62,8 +62,8 @@ define <4 x i16> @v4i16() #0 {
 define <2 x i32> @v2i32() #0 {
 ; CHECK-LABEL: v2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI6_0
-; CHECK-NEXT:    ldr d0, [x8, :lo12:.LCPI6_0]
+; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   ret <2 x i32> <i32 0, i32 1>
 }
@@ -73,8 +73,9 @@ define <2 x i32> @v2i32() #0 {
 define <4 x i32> @v4i32_non_zero_non_one() #0 {
 ; CHECK-LABEL: v4i32_non_zero_non_one:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI7_0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI7_0]
+; CHECK-NEXT:    index z0.s, #0, #2
+; CHECK-NEXT:    orr z0.s, z0.s, #0x1
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   ret <4 x i32> <i32 1, i32 3, i32 5, i32 7>
 }
@@ -83,8 +84,8 @@ define <4 x i32> @v4i32_non_zero_non_one() #0 {
 define <4 x i32> @v4i32_neg_immediates() #0 {
 ; CHECK-LABEL: v4i32_neg_immediates:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI8_0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI8_0]
+; CHECK-NEXT:    index z0.s, #-1, #-2
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   ret <4 x i32> <i32 -1, i32 -3, i32 -5, i32 -7>
 }
@@ -93,8 +94,9 @@ define <4 x i32> @v4i32_neg_immediates() #0 {
 define <4 x i32> @v4i32_out_range_start() #0 {
 ; CHECK-LABEL: v4i32_out_range_start:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI9_0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI9_0]
+; CHECK-NEXT:    index z0.s, #0, #1
+; CHECK-NEXT:    add z0.s, z0.s, #16 // =0x10
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   ret <4 x i32> <i32 16, i32 17, i32 18, i32 19>
 }
@@ -103,8 +105,9 @@ define <4 x i32> @v4i32_out_range_start() #0 {
 define <4 x i32> @v4i32_out_range_step() #0 {
 ; CHECK-LABEL: v4i32_out_range_step:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI10_0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI10_0]
+; CHECK-NEXT:    mov w8, #16 // =0x10
+; CHECK-NEXT:    index z0.s, #0, w8
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   ret <4 x i32> <i32 0, i32 16, i32 32, i32 48>
 }
@@ -113,8 +116,10 @@ define <4 x i32> @v4i32_out_range_step() #0 {
 define <4 x i32> @v4i32_out_range_start_step() #0 {
 ; CHECK-LABEL: v4i32_out_range_start_step:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI11_0
-; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI11_0]
+; CHECK-NEXT:    mov w8, #16 // =0x10
+; CHECK-NEXT:    index z0.s, #0, w8
+; CHECK-NEXT:    add z0.s, z0.s, #16 // =0x10
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   ret <4 x i32> <i32 16, i32 32, i32 48, i32 64>
 }



More information about the llvm-commits mailing list