[llvm] [AArch64][SVE] Lower unpredicated loads/stores as fixed LDR/STR with -msve-vector-bits=128. (PR #127500)

Ricardo Jesus via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 17 07:48:16 PST 2025


https://github.com/rj-jesus updated https://github.com/llvm/llvm-project/pull/127500

>From b610359688d753463752bf4ab75f044db515e1ab Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Fri, 14 Feb 2025 05:37:21 -0800
Subject: [PATCH 1/3] Precommit unpredicated loads/stores tests

---
 .../AArch64/sve-unpred-loads-stores.ll        | 497 ++++++++++++++++++
 1 file changed, 497 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll

diff --git a/llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll b/llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll
new file mode 100644
index 0000000000000..3170464033e19
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll
@@ -0,0 +1,497 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -aarch64-sve-vector-bits-max=0   < %s | FileCheck %s --check-prefix=CHECK-VLA
+; RUN: llc -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefix=CHECK-128
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define <vscale x 16 x i8> @ld_nxv16i8(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv16i8:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.b
+; CHECK-VLA-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv16i8:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.b
+; CHECK-128-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; CHECK-128-NEXT:    ret
+  %2 = load <vscale x 16 x i8>, ptr %0, align 16
+  ret <vscale x 16 x i8> %2
+}
+
+define void @st_nxv16i8(ptr %0, <vscale x 16 x i8> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv16i8:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.b
+; CHECK-VLA-NEXT:    st1b { z0.b }, p0, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv16i8:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.b
+; CHECK-128-NEXT:    st1b { z0.b }, p0, [x0]
+; CHECK-128-NEXT:    ret
+  store <vscale x 16 x i8> %1, ptr %0, align 16
+  ret void
+}
+
+define <vscale x 8 x i16> @ld_nxv8i16(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv8i16:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.h
+; CHECK-VLA-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv8i16:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.h
+; CHECK-128-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-128-NEXT:    ret
+  %2 = load <vscale x 8 x i16>, ptr %0, align 16
+  ret <vscale x 8 x i16> %2
+}
+
+define void @st_nxv8i16(ptr %0, <vscale x 8 x i16> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv8i16:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.h
+; CHECK-VLA-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv8i16:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.h
+; CHECK-128-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-128-NEXT:    ret
+  store <vscale x 8 x i16> %1, ptr %0, align 16
+  ret void
+}
+
+define <vscale x 4 x i32> @ld_nxv4i32(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv4i32:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.s
+; CHECK-VLA-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv4i32:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.s
+; CHECK-128-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-128-NEXT:    ret
+  %2 = load <vscale x 4 x i32>, ptr %0, align 16
+  ret <vscale x 4 x i32> %2
+}
+
+define void @st_nxv4i32(ptr %0, <vscale x 4 x i32> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv4i32:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.s
+; CHECK-VLA-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv4i32:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.s
+; CHECK-128-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-128-NEXT:    ret
+  store <vscale x 4 x i32> %1, ptr %0, align 16
+  ret void
+}
+
+define <vscale x 2 x i64> @ld_nxv2i64(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv2i64:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.d
+; CHECK-VLA-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv2i64:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.d
+; CHECK-128-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-128-NEXT:    ret
+  %2 = load <vscale x 2 x i64>, ptr %0, align 16
+  ret <vscale x 2 x i64> %2
+}
+
+define void @st_nxv2i64(ptr %0, <vscale x 2 x i64> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv2i64:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.d
+; CHECK-VLA-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv2i64:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.d
+; CHECK-128-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-128-NEXT:    ret
+  store <vscale x 2 x i64> %1, ptr %0, align 16
+  ret void
+}
+
+define <vscale x 8 x half> @ld_nxv8f16(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv8f16:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.h
+; CHECK-VLA-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv8f16:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.h
+; CHECK-128-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-128-NEXT:    ret
+  %2 = load <vscale x 8 x half>, ptr %0, align 16
+  ret <vscale x 8 x half> %2
+}
+
+define void @st_nxv8f16(ptr %0, <vscale x 8 x half> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv8f16:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.h
+; CHECK-VLA-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv8f16:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.h
+; CHECK-128-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-128-NEXT:    ret
+  store <vscale x 8 x half> %1, ptr %0, align 16
+  ret void
+}
+
+define <vscale x 4 x float> @ld_nxv4f32(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv4f32:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.s
+; CHECK-VLA-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv4f32:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.s
+; CHECK-128-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-128-NEXT:    ret
+  %2 = load <vscale x 4 x float>, ptr %0, align 16
+  ret <vscale x 4 x float> %2
+}
+
+define void @st_nxv4f32(ptr %0, <vscale x 4 x float> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv4f32:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.s
+; CHECK-VLA-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv4f32:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.s
+; CHECK-128-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-128-NEXT:    ret
+  store <vscale x 4 x float> %1, ptr %0, align 16
+  ret void
+}
+
+define <vscale x 2 x double> @ld_nxv2f64(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv2f64:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.d
+; CHECK-VLA-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv2f64:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.d
+; CHECK-128-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-128-NEXT:    ret
+  %2 = load <vscale x 2 x double>, ptr %0, align 16
+  ret <vscale x 2 x double> %2
+}
+
+define void @st_nxv2f64(ptr %0, <vscale x 2 x double> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv2f64:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.d
+; CHECK-VLA-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv2f64:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.d
+; CHECK-128-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-128-NEXT:    ret
+  store <vscale x 2 x double> %1, ptr %0, align 16
+  ret void
+}
+
+define <vscale x 16 x i8> @ld_nxv16i8_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv16i8_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.b
+; CHECK-VLA-NEXT:    ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv16i8_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.b
+; CHECK-128-NEXT:    ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %2 = tail call i64 @llvm.vscale.i64()
+  %3 = shl nuw nsw i64 %2, 4
+  %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+  %5 = load <vscale x 16 x i8>, ptr %4, align 16
+  ret <vscale x 16 x i8> %5
+}
+
+define void @st_nxv16i8_offset(ptr %0, <vscale x 16 x i8> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv16i8_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.b
+; CHECK-VLA-NEXT:    st1b { z0.b }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv16i8_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.b
+; CHECK-128-NEXT:    st1b { z0.b }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %3 = tail call i64 @llvm.vscale.i64()
+  %4 = shl nuw nsw i64 %3, 4
+  %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+  store <vscale x 16 x i8> %1, ptr %5, align 16
+  ret void
+}
+
+define <vscale x 8 x i16> @ld_nxv8i16_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv8i16_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.h
+; CHECK-VLA-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv8i16_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.h
+; CHECK-128-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %2 = tail call i64 @llvm.vscale.i64()
+  %3 = shl nuw nsw i64 %2, 4
+  %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+  %5 = load <vscale x 8 x i16>, ptr %4, align 16
+  ret <vscale x 8 x i16> %5
+}
+
+define void @st_nxv8i16_offset(ptr %0, <vscale x 8 x i16> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv8i16_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.h
+; CHECK-VLA-NEXT:    st1h { z0.h }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv8i16_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.h
+; CHECK-128-NEXT:    st1h { z0.h }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %3 = tail call i64 @llvm.vscale.i64()
+  %4 = shl nuw nsw i64 %3, 4
+  %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+  store <vscale x 8 x i16> %1, ptr %5, align 16
+  ret void
+}
+
+define <vscale x 4 x i32> @ld_nxv4i32_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv4i32_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.s
+; CHECK-VLA-NEXT:    ld1w { z0.s }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv4i32_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.s
+; CHECK-128-NEXT:    ld1w { z0.s }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %2 = tail call i64 @llvm.vscale.i64()
+  %3 = shl nuw nsw i64 %2, 4
+  %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+  %5 = load <vscale x 4 x i32>, ptr %4, align 16
+  ret <vscale x 4 x i32> %5
+}
+
+define void @st_nxv4i32_offset(ptr %0, <vscale x 4 x i32> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv4i32_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.s
+; CHECK-VLA-NEXT:    st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv4i32_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.s
+; CHECK-128-NEXT:    st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %3 = tail call i64 @llvm.vscale.i64()
+  %4 = shl nuw nsw i64 %3, 4
+  %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+  store <vscale x 4 x i32> %1, ptr %5, align 16
+  ret void
+}
+
+define <vscale x 2 x i64> @ld_nxv2i64_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv2i64_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.d
+; CHECK-VLA-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv2i64_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.d
+; CHECK-128-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %2 = tail call i64 @llvm.vscale.i64()
+  %3 = shl nuw nsw i64 %2, 4
+  %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+  %5 = load <vscale x 2 x i64>, ptr %4, align 16
+  ret <vscale x 2 x i64> %5
+}
+
+define void @st_nxv2i64_offset(ptr %0, <vscale x 2 x i64> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv2i64_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.d
+; CHECK-VLA-NEXT:    st1d { z0.d }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv2i64_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.d
+; CHECK-128-NEXT:    st1d { z0.d }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %3 = tail call i64 @llvm.vscale.i64()
+  %4 = shl nuw nsw i64 %3, 4
+  %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+  store <vscale x 2 x i64> %1, ptr %5, align 16
+  ret void
+}
+
+define <vscale x 8 x half> @ld_nxv8f16_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv8f16_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.h
+; CHECK-VLA-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv8f16_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.h
+; CHECK-128-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %2 = tail call i64 @llvm.vscale.i64()
+  %3 = shl nuw nsw i64 %2, 4
+  %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+  %5 = load <vscale x 8 x half>, ptr %4, align 16
+  ret <vscale x 8 x half> %5
+}
+
+define void @st_nxv8f16_offset(ptr %0, <vscale x 8 x half> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv8f16_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.h
+; CHECK-VLA-NEXT:    st1h { z0.h }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv8f16_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.h
+; CHECK-128-NEXT:    st1h { z0.h }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %3 = tail call i64 @llvm.vscale.i64()
+  %4 = shl nuw nsw i64 %3, 4
+  %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+  store <vscale x 8 x half> %1, ptr %5, align 16
+  ret void
+}
+
+define <vscale x 4 x float> @ld_nxv4f32_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv4f32_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.s
+; CHECK-VLA-NEXT:    ld1w { z0.s }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv4f32_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.s
+; CHECK-128-NEXT:    ld1w { z0.s }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %2 = tail call i64 @llvm.vscale.i64()
+  %3 = shl nuw nsw i64 %2, 4
+  %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+  %5 = load <vscale x 4 x float>, ptr %4, align 16
+  ret <vscale x 4 x float> %5
+}
+
+define void @st_nxv4f32_offset(ptr %0, <vscale x 4 x float> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv4f32_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.s
+; CHECK-VLA-NEXT:    st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv4f32_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.s
+; CHECK-128-NEXT:    st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %3 = tail call i64 @llvm.vscale.i64()
+  %4 = shl nuw nsw i64 %3, 4
+  %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+  store <vscale x 4 x float> %1, ptr %5, align 16
+  ret void
+}
+
+define <vscale x 2 x double> @ld_nxv2f64_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv2f64_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.d
+; CHECK-VLA-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: ld_nxv2f64_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.d
+; CHECK-128-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %2 = tail call i64 @llvm.vscale.i64()
+  %3 = shl nuw nsw i64 %2, 4
+  %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+  %5 = load <vscale x 2 x double>, ptr %4, align 16
+  ret <vscale x 2 x double> %5
+}
+
+define void @st_nxv2f64_offset(ptr %0, <vscale x 2 x double> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv2f64_offset:
+; CHECK-VLA:       // %bb.0:
+; CHECK-VLA-NEXT:    ptrue p0.d
+; CHECK-VLA-NEXT:    st1d { z0.d }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT:    ret
+;
+; CHECK-128-LABEL: st_nxv2f64_offset:
+; CHECK-128:       // %bb.0:
+; CHECK-128-NEXT:    ptrue p0.d
+; CHECK-128-NEXT:    st1d { z0.d }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    ret
+  %3 = tail call i64 @llvm.vscale.i64()
+  %4 = shl nuw nsw i64 %3, 4
+  %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+  store <vscale x 2 x double> %1, ptr %5, align 16
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }

>From 01bdff56bf04d3feb0784f5f9e2c83743c62da3d Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Fri, 14 Feb 2025 05:37:32 -0800
Subject: [PATCH 2/3] [AArch64][SVE] Lower unpredicated loads/stores as LDR/STR
 with sve-vector-bits=128.

Given the code below:
```cpp
svuint8_t foo(uint8_t *x) {
  return svld1(svptrue_b8(), x);
}
```
When compiled with -msve-vector-bits=128 (or vscale_range(1, 1)), we
currently generate:
```gas
foo:
  ptrue   p0.b
  ld1b    { z0.b }, p0/z, [x0]
  ret
```
Whereas (on little-endian) we could instead be using LDR as follows:
```gas
foo:
  ldr     q0, [x0]
  ret
```

Besides avoiding the predicate dependency, the above form enables
further optimisations such as LDP folds. Likewise for stores.
---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 55 +++++++++++
 .../AArch64/sve-unpred-loads-stores.ll        | 98 ++++++++-----------
 2 files changed, 97 insertions(+), 56 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4263be1098899..173a875a256e0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -23550,6 +23550,31 @@ static SDValue combineV3I8LoadExt(LoadSDNode *LD, SelectionDAG &DAG) {
   return DAG.getMergeValues({Extract, TokenFactor}, DL);
 }
 
+// Replace scalable loads with fixed loads when vscale_range(1, 1).
+// This enables further optimisations such as LDP folds.
+static SDValue combineVScale1Load(LoadSDNode *LD, SelectionDAG &DAG,
+                                  const AArch64Subtarget *Subtarget) {
+  EVT MemVT = LD->getMemoryVT();
+  if (!MemVT.isScalableVector() ||
+      Subtarget->getMaxSVEVectorSizeInBits() != AArch64::SVEBitsPerBlock)
+    return SDValue();
+
+  // Skip unpacked types given their different layouts between Neon and SVE.
+  if (MemVT.getSizeInBits().getKnownMinValue() != AArch64::SVEBitsPerBlock)
+    return SDValue();
+
+  SDLoc DL(LD);
+  MVT NewVT = MVT::getVectorVT(MemVT.getVectorElementType().getSimpleVT(),
+                               MemVT.getVectorMinNumElements());
+  SDValue NewLoad = DAG.getLoad(
+      NewVT, DL, LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
+      LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), LD->getAAInfo());
+  SDValue Insert = convertToScalableVector(DAG, MemVT, NewLoad);
+  SDValue TokenFactor = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
+                                    {SDValue(cast<SDNode>(NewLoad), 1)});
+  return DAG.getMergeValues({Insert, TokenFactor}, DL);
+}
+
 // Perform TBI simplification if supported by the target and try to break up
 // nontemporal loads larger than 256-bits loads for odd types so LDNPQ 256-bit
 // load instructions can be selected.
@@ -23587,6 +23612,9 @@ static SDValue performLOADCombine(SDNode *N,
   if (SDValue Res = combineV3I8LoadExt(LD, DAG))
     return Res;
 
+  if (SDValue Res = combineVScale1Load(LD, DAG, Subtarget))
+    return Res;
+
   if (!LD->isNonTemporal())
     return SDValue(N, 0);
 
@@ -23845,6 +23873,30 @@ static SDValue combineI8TruncStore(StoreSDNode *ST, SelectionDAG &DAG,
   return Chain;
 }
 
+// Replace scalable stores with fixed stores when vscale_range(1, 1).
+static SDValue combineVScale1Store(StoreSDNode *ST, SelectionDAG &DAG,
+                                   const AArch64Subtarget *Subtarget) {
+  SDValue Value = ST->getValue();
+  EVT ValueVT = Value.getValueType();
+  if (ST->isVolatile() || !Subtarget->isLittleEndian() ||
+      !ValueVT.isScalableVector() ||
+      Subtarget->getMaxSVEVectorSizeInBits() != AArch64::SVEBitsPerBlock)
+    return SDValue();
+
+  // Skip unpacked types given their different layouts between Neon and SVE.
+  if (ValueVT.getSizeInBits().getKnownMinValue() != AArch64::SVEBitsPerBlock)
+    return SDValue();
+
+  SDLoc DL(ST);
+  MVT NewVT = MVT::getVectorVT(ValueVT.getVectorElementType().getSimpleVT(),
+                               ValueVT.getVectorMinNumElements());
+  SDValue NewValue = convertFromScalableVector(DAG, NewVT, Value);
+  SDValue NewStore = DAG.getStore(
+      ST->getChain(), DL, NewValue, ST->getBasePtr(), ST->getPointerInfo(),
+      ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), ST->getAAInfo());
+  return NewStore;
+}
+
 static SDValue performSTORECombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI,
                                    SelectionDAG &DAG,
@@ -23879,6 +23931,9 @@ static SDValue performSTORECombine(SDNode *N,
   if (SDValue Res = combineI8TruncStore(ST, DAG, Subtarget))
     return Res;
 
+  if (SDValue Res = combineVScale1Store(ST, DAG, Subtarget))
+    return Res;
+
   // If this is an FP_ROUND followed by a store, fold this into a truncating
   // store. We can do this even if this is already a truncstore.
   // We purposefully don't care about legality of the nodes here as we know
diff --git a/llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll b/llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll
index 3170464033e19..f2d4933d43259 100644
--- a/llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll
@@ -13,8 +13,7 @@ define <vscale x 16 x i8> @ld_nxv16i8(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv16i8:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.b
-; CHECK-128-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; CHECK-128-NEXT:    ldr q0, [x0]
 ; CHECK-128-NEXT:    ret
   %2 = load <vscale x 16 x i8>, ptr %0, align 16
   ret <vscale x 16 x i8> %2
@@ -29,8 +28,7 @@ define void @st_nxv16i8(ptr %0, <vscale x 16 x i8> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv16i8:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.b
-; CHECK-128-NEXT:    st1b { z0.b }, p0, [x0]
+; CHECK-128-NEXT:    str q0, [x0]
 ; CHECK-128-NEXT:    ret
   store <vscale x 16 x i8> %1, ptr %0, align 16
   ret void
@@ -45,8 +43,7 @@ define <vscale x 8 x i16> @ld_nxv8i16(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv8i16:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.h
-; CHECK-128-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-128-NEXT:    ldr q0, [x0]
 ; CHECK-128-NEXT:    ret
   %2 = load <vscale x 8 x i16>, ptr %0, align 16
   ret <vscale x 8 x i16> %2
@@ -61,8 +58,7 @@ define void @st_nxv8i16(ptr %0, <vscale x 8 x i16> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv8i16:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.h
-; CHECK-128-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-128-NEXT:    str q0, [x0]
 ; CHECK-128-NEXT:    ret
   store <vscale x 8 x i16> %1, ptr %0, align 16
   ret void
@@ -77,8 +73,7 @@ define <vscale x 4 x i32> @ld_nxv4i32(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv4i32:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.s
-; CHECK-128-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-128-NEXT:    ldr q0, [x0]
 ; CHECK-128-NEXT:    ret
   %2 = load <vscale x 4 x i32>, ptr %0, align 16
   ret <vscale x 4 x i32> %2
@@ -93,8 +88,7 @@ define void @st_nxv4i32(ptr %0, <vscale x 4 x i32> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv4i32:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.s
-; CHECK-128-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-128-NEXT:    str q0, [x0]
 ; CHECK-128-NEXT:    ret
   store <vscale x 4 x i32> %1, ptr %0, align 16
   ret void
@@ -109,8 +103,7 @@ define <vscale x 2 x i64> @ld_nxv2i64(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv2i64:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.d
-; CHECK-128-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-128-NEXT:    ldr q0, [x0]
 ; CHECK-128-NEXT:    ret
   %2 = load <vscale x 2 x i64>, ptr %0, align 16
   ret <vscale x 2 x i64> %2
@@ -125,8 +118,7 @@ define void @st_nxv2i64(ptr %0, <vscale x 2 x i64> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv2i64:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.d
-; CHECK-128-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-128-NEXT:    str q0, [x0]
 ; CHECK-128-NEXT:    ret
   store <vscale x 2 x i64> %1, ptr %0, align 16
   ret void
@@ -141,8 +133,7 @@ define <vscale x 8 x half> @ld_nxv8f16(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv8f16:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.h
-; CHECK-128-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-128-NEXT:    ldr q0, [x0]
 ; CHECK-128-NEXT:    ret
   %2 = load <vscale x 8 x half>, ptr %0, align 16
   ret <vscale x 8 x half> %2
@@ -157,8 +148,7 @@ define void @st_nxv8f16(ptr %0, <vscale x 8 x half> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv8f16:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.h
-; CHECK-128-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-128-NEXT:    str q0, [x0]
 ; CHECK-128-NEXT:    ret
   store <vscale x 8 x half> %1, ptr %0, align 16
   ret void
@@ -173,8 +163,7 @@ define <vscale x 4 x float> @ld_nxv4f32(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv4f32:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.s
-; CHECK-128-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-128-NEXT:    ldr q0, [x0]
 ; CHECK-128-NEXT:    ret
   %2 = load <vscale x 4 x float>, ptr %0, align 16
   ret <vscale x 4 x float> %2
@@ -189,8 +178,7 @@ define void @st_nxv4f32(ptr %0, <vscale x 4 x float> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv4f32:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.s
-; CHECK-128-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-128-NEXT:    str q0, [x0]
 ; CHECK-128-NEXT:    ret
   store <vscale x 4 x float> %1, ptr %0, align 16
   ret void
@@ -205,8 +193,7 @@ define <vscale x 2 x double> @ld_nxv2f64(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv2f64:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.d
-; CHECK-128-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-128-NEXT:    ldr q0, [x0]
 ; CHECK-128-NEXT:    ret
   %2 = load <vscale x 2 x double>, ptr %0, align 16
   ret <vscale x 2 x double> %2
@@ -221,8 +208,7 @@ define void @st_nxv2f64(ptr %0, <vscale x 2 x double> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv2f64:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.d
-; CHECK-128-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-128-NEXT:    str q0, [x0]
 ; CHECK-128-NEXT:    ret
   store <vscale x 2 x double> %1, ptr %0, align 16
   ret void
@@ -237,8 +223,8 @@ define <vscale x 16 x i8> @ld_nxv16i8_offset(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv16i8_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.b
-; CHECK-128-NEXT:    ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    ldr q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %2 = tail call i64 @llvm.vscale.i64()
   %3 = shl nuw nsw i64 %2, 4
@@ -256,8 +242,8 @@ define void @st_nxv16i8_offset(ptr %0, <vscale x 16 x i8> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv16i8_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.b
-; CHECK-128-NEXT:    st1b { z0.b }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    str q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %3 = tail call i64 @llvm.vscale.i64()
   %4 = shl nuw nsw i64 %3, 4
@@ -275,8 +261,8 @@ define <vscale x 8 x i16> @ld_nxv8i16_offset(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv8i16_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.h
-; CHECK-128-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    ldr q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %2 = tail call i64 @llvm.vscale.i64()
   %3 = shl nuw nsw i64 %2, 4
@@ -294,8 +280,8 @@ define void @st_nxv8i16_offset(ptr %0, <vscale x 8 x i16> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv8i16_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.h
-; CHECK-128-NEXT:    st1h { z0.h }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    str q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %3 = tail call i64 @llvm.vscale.i64()
   %4 = shl nuw nsw i64 %3, 4
@@ -313,8 +299,8 @@ define <vscale x 4 x i32> @ld_nxv4i32_offset(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv4i32_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.s
-; CHECK-128-NEXT:    ld1w { z0.s }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    ldr q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %2 = tail call i64 @llvm.vscale.i64()
   %3 = shl nuw nsw i64 %2, 4
@@ -332,8 +318,8 @@ define void @st_nxv4i32_offset(ptr %0, <vscale x 4 x i32> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv4i32_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.s
-; CHECK-128-NEXT:    st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    str q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %3 = tail call i64 @llvm.vscale.i64()
   %4 = shl nuw nsw i64 %3, 4
@@ -351,8 +337,8 @@ define <vscale x 2 x i64> @ld_nxv2i64_offset(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv2i64_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.d
-; CHECK-128-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    ldr q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %2 = tail call i64 @llvm.vscale.i64()
   %3 = shl nuw nsw i64 %2, 4
@@ -370,8 +356,8 @@ define void @st_nxv2i64_offset(ptr %0, <vscale x 2 x i64> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv2i64_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.d
-; CHECK-128-NEXT:    st1d { z0.d }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    str q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %3 = tail call i64 @llvm.vscale.i64()
   %4 = shl nuw nsw i64 %3, 4
@@ -389,8 +375,8 @@ define <vscale x 8 x half> @ld_nxv8f16_offset(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv8f16_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.h
-; CHECK-128-NEXT:    ld1h { z0.h }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    ldr q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %2 = tail call i64 @llvm.vscale.i64()
   %3 = shl nuw nsw i64 %2, 4
@@ -408,8 +394,8 @@ define void @st_nxv8f16_offset(ptr %0, <vscale x 8 x half> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv8f16_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.h
-; CHECK-128-NEXT:    st1h { z0.h }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    str q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %3 = tail call i64 @llvm.vscale.i64()
   %4 = shl nuw nsw i64 %3, 4
@@ -427,8 +413,8 @@ define <vscale x 4 x float> @ld_nxv4f32_offset(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv4f32_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.s
-; CHECK-128-NEXT:    ld1w { z0.s }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    ldr q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %2 = tail call i64 @llvm.vscale.i64()
   %3 = shl nuw nsw i64 %2, 4
@@ -446,8 +432,8 @@ define void @st_nxv4f32_offset(ptr %0, <vscale x 4 x float> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv4f32_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.s
-; CHECK-128-NEXT:    st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    str q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %3 = tail call i64 @llvm.vscale.i64()
   %4 = shl nuw nsw i64 %3, 4
@@ -465,8 +451,8 @@ define <vscale x 2 x double> @ld_nxv2f64_offset(ptr %0) #0 {
 ;
 ; CHECK-128-LABEL: ld_nxv2f64_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.d
-; CHECK-128-NEXT:    ld1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    ldr q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %2 = tail call i64 @llvm.vscale.i64()
   %3 = shl nuw nsw i64 %2, 4
@@ -484,8 +470,8 @@ define void @st_nxv2f64_offset(ptr %0, <vscale x 2 x double> %1) #0 {
 ;
 ; CHECK-128-LABEL: st_nxv2f64_offset:
 ; CHECK-128:       // %bb.0:
-; CHECK-128-NEXT:    ptrue p0.d
-; CHECK-128-NEXT:    st1d { z0.d }, p0, [x0, #1, mul vl]
+; CHECK-128-NEXT:    rdvl x8, #1
+; CHECK-128-NEXT:    str q0, [x0, x8]
 ; CHECK-128-NEXT:    ret
   %3 = tail call i64 @llvm.vscale.i64()
   %4 = shl nuw nsw i64 %3, 4

>From 90802ec069a88ebce7de370658093ed919480fb8 Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Mon, 17 Feb 2025 07:12:35 -0800
Subject: [PATCH 3/3] Ensure Neon is available

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 173a875a256e0..cd5ccfba9701d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -23555,7 +23555,7 @@ static SDValue combineV3I8LoadExt(LoadSDNode *LD, SelectionDAG &DAG) {
 static SDValue combineVScale1Load(LoadSDNode *LD, SelectionDAG &DAG,
                                   const AArch64Subtarget *Subtarget) {
   EVT MemVT = LD->getMemoryVT();
-  if (!MemVT.isScalableVector() ||
+  if (!Subtarget->isNeonAvailable() || !MemVT.isScalableVector() ||
       Subtarget->getMaxSVEVectorSizeInBits() != AArch64::SVEBitsPerBlock)
     return SDValue();
 
@@ -23879,7 +23879,7 @@ static SDValue combineVScale1Store(StoreSDNode *ST, SelectionDAG &DAG,
   SDValue Value = ST->getValue();
   EVT ValueVT = Value.getValueType();
   if (ST->isVolatile() || !Subtarget->isLittleEndian() ||
-      !ValueVT.isScalableVector() ||
+      !Subtarget->isNeonAvailable() || !ValueVT.isScalableVector() ||
       Subtarget->getMaxSVEVectorSizeInBits() != AArch64::SVEBitsPerBlock)
     return SDValue();
 



More information about the llvm-commits mailing list