[clang] [llvm] [AArch64] Add quadword gather load/scatter store intrinsics with unscaled vector offset (PR #71290)

via cfe-commits cfe-commits at lists.llvm.org
Tue Nov 21 03:19:31 PST 2023


================
@@ -0,0 +1,249 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1,+bf16 < %s | FileCheck %s
+
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1q.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 1 x i1>, <vscale x 2 x i64>, i64)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1q.gather.scalar.offset.nxv4i32.nxv2i64(<vscale x 1 x i1>, <vscale x 2 x i64>, i64)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1q.gather.scalar.offset.nxv8i16.nxv2i64(<vscale x 1 x i1>, <vscale x 2 x i64>, i64)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1q.gather.scalar.offset.nxv16i8.nxv2i64(<vscale x 1 x i1>, <vscale x 2 x i64>, i64)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1q.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 1 x i1>, <vscale x 2 x i64>, i64)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1q.gather.scalar.offset.nxv4f32.nxv2i64(<vscale x 1 x i1>, <vscale x 2 x i64>, i64)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ld1q.gather.scalar.offset.nxv8f16.nxv2i64(<vscale x 1 x i1>, <vscale x 2 x i64>, i64)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1q.gather.scalar.offset.nxv8bf16.nxv2i64(<vscale x 1 x i1>, <vscale x 2 x i64>, i64)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1q.gather.index.nxv4i32(<vscale x 1 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1q.gather.index.nxv8i16(<vscale x 1 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1q.gather.index.nxv2i64(<vscale x 1 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1q.gather.index.nxv8bf16(<vscale x 1 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 8 x half> @llvm.aarch64.sve.ld1q.gather.index.nxv8f16(<vscale x 1 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.ld1q.gather.index.nxv4f32(<vscale x 1 x i1>, ptr, <vscale x 2 x i64>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.ld1q.gather.index.nxv2f64(<vscale x 1 x i1>, ptr, <vscale x 2 x i64>)
+
+define <vscale x 8 x i16> @test_svld1q_gather_u64index_s16(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx) {
+; CHECK-LABEL: test_svld1q_gather_u64index_s16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    lsl z0.d, z0.d, #1
+; CHECK-NEXT:    ld1q { z0.q }, p0/z, [z0.d, x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.ld1q.gather.index.nxv8i16(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx)
+  ret <vscale x 8 x i16> %0
+}
+
+define <vscale x 8 x i16> @test_svld1q_gather_u64index_u16(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx) {
+; CHECK-LABEL: test_svld1q_gather_u64index_u16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    lsl z0.d, z0.d, #1
+; CHECK-NEXT:    ld1q { z0.q }, p0/z, [z0.d, x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.ld1q.gather.index.nxv8i16(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx)
+  ret <vscale x 8 x i16> %0
+}
+
+define <vscale x 4 x i32> @test_svld1q_gather_u64index_s32(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx) {
+; CHECK-LABEL: test_svld1q_gather_u64index_s32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    lsl z0.d, z0.d, #2
+; CHECK-NEXT:    ld1q { z0.q }, p0/z, [z0.d, x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ld1q.gather.index.nxv4i32(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx)
+  ret <vscale x 4 x i32> %0
+}
+
+define <vscale x 4 x i32> @test_svld1q_gather_u64index_u32(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx) {
+; CHECK-LABEL: test_svld1q_gather_u64index_u32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    lsl z0.d, z0.d, #2
+; CHECK-NEXT:    ld1q { z0.q }, p0/z, [z0.d, x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ld1q.gather.index.nxv4i32(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx)
+  ret <vscale x 4 x i32> %0
+}
+
+define <vscale x 2 x i64> @test_svld1q_gather_u64index_s64(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx) {
+; CHECK-LABEL: test_svld1q_gather_u64index_s64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    lsl z0.d, z0.d, #3
+; CHECK-NEXT:    ld1q { z0.q }, p0/z, [z0.d, x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ld1q.gather.index.nxv2i64(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx)
+  ret <vscale x 2 x i64> %0
+}
+
+define <vscale x 2 x i64> @test_svld1q_gather_u64index_u64(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx) {
+; CHECK-LABEL: test_svld1q_gather_u64index_u64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    lsl z0.d, z0.d, #3
+; CHECK-NEXT:    ld1q { z0.q }, p0/z, [z0.d, x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ld1q.gather.index.nxv2i64(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx)
+  ret <vscale x 2 x i64> %0
+}
+
+define <vscale x 8 x bfloat> @test_svld1q_gather_u64index_bf16(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx) {
+; CHECK-LABEL: test_svld1q_gather_u64index_bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    lsl z0.d, z0.d, #1
+; CHECK-NEXT:    ld1q { z0.q }, p0/z, [z0.d, x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.ld1q.gather.index.nxv8bf16(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx)
+  ret <vscale x 8 x bfloat> %0
+}
+
+define <vscale x 8 x half> @test_svld1q_gather_u64index_f16(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx) {
+; CHECK-LABEL: test_svld1q_gather_u64index_f16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    lsl z0.d, z0.d, #1
+; CHECK-NEXT:    ld1q { z0.q }, p0/z, [z0.d, x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 8 x half> @llvm.aarch64.sve.ld1q.gather.index.nxv8f16(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx)
+  ret <vscale x 8 x half> %0
+}
+
+define <vscale x 4 x float> @test_svld1q_gather_u64index_f32(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx) {
+; CHECK-LABEL: test_svld1q_gather_u64index_f32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    lsl z0.d, z0.d, #2
+; CHECK-NEXT:    ld1q { z0.q }, p0/z, [z0.d, x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 4 x float> @llvm.aarch64.sve.ld1q.gather.index.nxv4f32(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx)
+  ret <vscale x 4 x float> %0
+}
+
+define <vscale x 2 x double> @test_svld1q_gather_u64index_f64(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx) {
+; CHECK-LABEL: test_svld1q_gather_u64index_f64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    lsl z0.d, z0.d, #3
+; CHECK-NEXT:    ld1q { z0.q }, p0/z, [z0.d, x0]
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call <vscale x 2 x double> @llvm.aarch64.sve.ld1q.gather.index.nxv2f64(<vscale x 1 x i1> %pg, ptr %base, <vscale x 2 x i64> %idx)
+  ret <vscale x 2 x double> %0
+}
+
+define <vscale x 8 x i16> @test_svld1q_gather_u64base_index_s16(<vscale x 1 x i1> %pg, <vscale x 2 x i64> %base, i64 %idx) {
----------------
CarolineConcatto wrote:

I  am not sure we need the tests for u64base_index. 
We already tests this llvm-ir : llvm.aarch64.sve.ld1q.gather.scalar.offset in [sve2p1-intrinsics-gather-loads-128bit-unscaled-offset.ll](https://github.com/llvm/llvm-project/pull/71290/files#diff-4b14c330b3e12acf3e0886be41fcd53bbc4dcdd8970acc9e9d57e61676aa17d3)

https://github.com/llvm/llvm-project/pull/71290


More information about the cfe-commits mailing list