[llvm] [AArch64][SVE] Lower unpredicated loads/stores as fixed LDR/STR with -msve-vector-bits=128. (PR #127500)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 17 06:47:40 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: Ricardo Jesus (rj-jesus)
<details>
<summary>Changes</summary>
Given the code below:
```cpp
svuint8_t foo(uint8_t *x) {
return svld1(svptrue_b8(), x);
}
```
When compiled with -msve-vector-bits=128 (or vscale_range(1, 1)), we
currently generate:
```gas
foo:
ptrue p0.b
ld1b { z0.b }, p0/z, [x0]
ret
```
Whereas (on little-endian) we could instead be using LDR as follows:
```gas
foo:
ldr q0, [x0]
ret
```
Besides avoiding the predicate dependency, the above form enables
further optimisations such as LDP folds. Likewise for other types and
stores.
I have a patch that enables similar folds for SVE LDR/STR, but since that
causes a fair number of test changes I rather open a separate PR for it.
---
Full diff: https://github.com/llvm/llvm-project/pull/127500.diff
2 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+55)
- (added) llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll (+483)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4263be1098899..173a875a256e0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -23550,6 +23550,31 @@ static SDValue combineV3I8LoadExt(LoadSDNode *LD, SelectionDAG &DAG) {
return DAG.getMergeValues({Extract, TokenFactor}, DL);
}
+// Replace scalable loads with fixed loads when vscale_range(1, 1).
+// This enables further optimisations such as LDP folds.
+static SDValue combineVScale1Load(LoadSDNode *LD, SelectionDAG &DAG,
+ const AArch64Subtarget *Subtarget) {
+ EVT MemVT = LD->getMemoryVT();
+ if (!MemVT.isScalableVector() ||
+ Subtarget->getMaxSVEVectorSizeInBits() != AArch64::SVEBitsPerBlock)
+ return SDValue();
+
+ // Skip unpacked types given their different layouts between Neon and SVE.
+ if (MemVT.getSizeInBits().getKnownMinValue() != AArch64::SVEBitsPerBlock)
+ return SDValue();
+
+ SDLoc DL(LD);
+ MVT NewVT = MVT::getVectorVT(MemVT.getVectorElementType().getSimpleVT(),
+ MemVT.getVectorMinNumElements());
+ SDValue NewLoad = DAG.getLoad(
+ NewVT, DL, LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
+ LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), LD->getAAInfo());
+ SDValue Insert = convertToScalableVector(DAG, MemVT, NewLoad);
+ SDValue TokenFactor = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
+ {SDValue(cast<SDNode>(NewLoad), 1)});
+ return DAG.getMergeValues({Insert, TokenFactor}, DL);
+}
+
// Perform TBI simplification if supported by the target and try to break up
// nontemporal loads larger than 256-bits loads for odd types so LDNPQ 256-bit
// load instructions can be selected.
@@ -23587,6 +23612,9 @@ static SDValue performLOADCombine(SDNode *N,
if (SDValue Res = combineV3I8LoadExt(LD, DAG))
return Res;
+ if (SDValue Res = combineVScale1Load(LD, DAG, Subtarget))
+ return Res;
+
if (!LD->isNonTemporal())
return SDValue(N, 0);
@@ -23845,6 +23873,30 @@ static SDValue combineI8TruncStore(StoreSDNode *ST, SelectionDAG &DAG,
return Chain;
}
+// Replace scalable stores with fixed stores when vscale_range(1, 1).
+static SDValue combineVScale1Store(StoreSDNode *ST, SelectionDAG &DAG,
+ const AArch64Subtarget *Subtarget) {
+ SDValue Value = ST->getValue();
+ EVT ValueVT = Value.getValueType();
+ if (ST->isVolatile() || !Subtarget->isLittleEndian() ||
+ !ValueVT.isScalableVector() ||
+ Subtarget->getMaxSVEVectorSizeInBits() != AArch64::SVEBitsPerBlock)
+ return SDValue();
+
+ // Skip unpacked types given their different layouts between Neon and SVE.
+ if (ValueVT.getSizeInBits().getKnownMinValue() != AArch64::SVEBitsPerBlock)
+ return SDValue();
+
+ SDLoc DL(ST);
+ MVT NewVT = MVT::getVectorVT(ValueVT.getVectorElementType().getSimpleVT(),
+ ValueVT.getVectorMinNumElements());
+ SDValue NewValue = convertFromScalableVector(DAG, NewVT, Value);
+ SDValue NewStore = DAG.getStore(
+ ST->getChain(), DL, NewValue, ST->getBasePtr(), ST->getPointerInfo(),
+ ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), ST->getAAInfo());
+ return NewStore;
+}
+
static SDValue performSTORECombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG,
@@ -23879,6 +23931,9 @@ static SDValue performSTORECombine(SDNode *N,
if (SDValue Res = combineI8TruncStore(ST, DAG, Subtarget))
return Res;
+ if (SDValue Res = combineVScale1Store(ST, DAG, Subtarget))
+ return Res;
+
// If this is an FP_ROUND followed by a store, fold this into a truncating
// store. We can do this even if this is already a truncstore.
// We purposefully don't care about legality of the nodes here as we know
diff --git a/llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll b/llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll
new file mode 100644
index 0000000000000..f2d4933d43259
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-unpred-loads-stores.ll
@@ -0,0 +1,483 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -aarch64-sve-vector-bits-max=0 < %s | FileCheck %s --check-prefix=CHECK-VLA
+; RUN: llc -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefix=CHECK-128
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define <vscale x 16 x i8> @ld_nxv16i8(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv16i8:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.b
+; CHECK-VLA-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv16i8:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldr q0, [x0]
+; CHECK-128-NEXT: ret
+ %2 = load <vscale x 16 x i8>, ptr %0, align 16
+ ret <vscale x 16 x i8> %2
+}
+
+define void @st_nxv16i8(ptr %0, <vscale x 16 x i8> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv16i8:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.b
+; CHECK-VLA-NEXT: st1b { z0.b }, p0, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv16i8:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: str q0, [x0]
+; CHECK-128-NEXT: ret
+ store <vscale x 16 x i8> %1, ptr %0, align 16
+ ret void
+}
+
+define <vscale x 8 x i16> @ld_nxv8i16(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv8i16:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.h
+; CHECK-VLA-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv8i16:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldr q0, [x0]
+; CHECK-128-NEXT: ret
+ %2 = load <vscale x 8 x i16>, ptr %0, align 16
+ ret <vscale x 8 x i16> %2
+}
+
+define void @st_nxv8i16(ptr %0, <vscale x 8 x i16> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv8i16:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.h
+; CHECK-VLA-NEXT: st1h { z0.h }, p0, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv8i16:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: str q0, [x0]
+; CHECK-128-NEXT: ret
+ store <vscale x 8 x i16> %1, ptr %0, align 16
+ ret void
+}
+
+define <vscale x 4 x i32> @ld_nxv4i32(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv4i32:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.s
+; CHECK-VLA-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv4i32:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldr q0, [x0]
+; CHECK-128-NEXT: ret
+ %2 = load <vscale x 4 x i32>, ptr %0, align 16
+ ret <vscale x 4 x i32> %2
+}
+
+define void @st_nxv4i32(ptr %0, <vscale x 4 x i32> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv4i32:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.s
+; CHECK-VLA-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv4i32:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: str q0, [x0]
+; CHECK-128-NEXT: ret
+ store <vscale x 4 x i32> %1, ptr %0, align 16
+ ret void
+}
+
+define <vscale x 2 x i64> @ld_nxv2i64(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv2i64:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.d
+; CHECK-VLA-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv2i64:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldr q0, [x0]
+; CHECK-128-NEXT: ret
+ %2 = load <vscale x 2 x i64>, ptr %0, align 16
+ ret <vscale x 2 x i64> %2
+}
+
+define void @st_nxv2i64(ptr %0, <vscale x 2 x i64> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv2i64:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.d
+; CHECK-VLA-NEXT: st1d { z0.d }, p0, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv2i64:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: str q0, [x0]
+; CHECK-128-NEXT: ret
+ store <vscale x 2 x i64> %1, ptr %0, align 16
+ ret void
+}
+
+define <vscale x 8 x half> @ld_nxv8f16(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv8f16:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.h
+; CHECK-VLA-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv8f16:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldr q0, [x0]
+; CHECK-128-NEXT: ret
+ %2 = load <vscale x 8 x half>, ptr %0, align 16
+ ret <vscale x 8 x half> %2
+}
+
+define void @st_nxv8f16(ptr %0, <vscale x 8 x half> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv8f16:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.h
+; CHECK-VLA-NEXT: st1h { z0.h }, p0, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv8f16:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: str q0, [x0]
+; CHECK-128-NEXT: ret
+ store <vscale x 8 x half> %1, ptr %0, align 16
+ ret void
+}
+
+define <vscale x 4 x float> @ld_nxv4f32(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv4f32:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.s
+; CHECK-VLA-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv4f32:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldr q0, [x0]
+; CHECK-128-NEXT: ret
+ %2 = load <vscale x 4 x float>, ptr %0, align 16
+ ret <vscale x 4 x float> %2
+}
+
+define void @st_nxv4f32(ptr %0, <vscale x 4 x float> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv4f32:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.s
+; CHECK-VLA-NEXT: st1w { z0.s }, p0, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv4f32:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: str q0, [x0]
+; CHECK-128-NEXT: ret
+ store <vscale x 4 x float> %1, ptr %0, align 16
+ ret void
+}
+
+define <vscale x 2 x double> @ld_nxv2f64(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv2f64:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.d
+; CHECK-VLA-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv2f64:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: ldr q0, [x0]
+; CHECK-128-NEXT: ret
+ %2 = load <vscale x 2 x double>, ptr %0, align 16
+ ret <vscale x 2 x double> %2
+}
+
+define void @st_nxv2f64(ptr %0, <vscale x 2 x double> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv2f64:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.d
+; CHECK-VLA-NEXT: st1d { z0.d }, p0, [x0]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv2f64:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: str q0, [x0]
+; CHECK-128-NEXT: ret
+ store <vscale x 2 x double> %1, ptr %0, align 16
+ ret void
+}
+
+define <vscale x 16 x i8> @ld_nxv16i8_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv16i8_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.b
+; CHECK-VLA-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv16i8_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: ldr q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %2 = tail call i64 @llvm.vscale.i64()
+ %3 = shl nuw nsw i64 %2, 4
+ %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+ %5 = load <vscale x 16 x i8>, ptr %4, align 16
+ ret <vscale x 16 x i8> %5
+}
+
+define void @st_nxv16i8_offset(ptr %0, <vscale x 16 x i8> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv16i8_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.b
+; CHECK-VLA-NEXT: st1b { z0.b }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv16i8_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: str q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %3 = tail call i64 @llvm.vscale.i64()
+ %4 = shl nuw nsw i64 %3, 4
+ %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+ store <vscale x 16 x i8> %1, ptr %5, align 16
+ ret void
+}
+
+define <vscale x 8 x i16> @ld_nxv8i16_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv8i16_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.h
+; CHECK-VLA-NEXT: ld1h { z0.h }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv8i16_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: ldr q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %2 = tail call i64 @llvm.vscale.i64()
+ %3 = shl nuw nsw i64 %2, 4
+ %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+ %5 = load <vscale x 8 x i16>, ptr %4, align 16
+ ret <vscale x 8 x i16> %5
+}
+
+define void @st_nxv8i16_offset(ptr %0, <vscale x 8 x i16> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv8i16_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.h
+; CHECK-VLA-NEXT: st1h { z0.h }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv8i16_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: str q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %3 = tail call i64 @llvm.vscale.i64()
+ %4 = shl nuw nsw i64 %3, 4
+ %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+ store <vscale x 8 x i16> %1, ptr %5, align 16
+ ret void
+}
+
+define <vscale x 4 x i32> @ld_nxv4i32_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv4i32_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.s
+; CHECK-VLA-NEXT: ld1w { z0.s }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv4i32_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: ldr q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %2 = tail call i64 @llvm.vscale.i64()
+ %3 = shl nuw nsw i64 %2, 4
+ %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+ %5 = load <vscale x 4 x i32>, ptr %4, align 16
+ ret <vscale x 4 x i32> %5
+}
+
+define void @st_nxv4i32_offset(ptr %0, <vscale x 4 x i32> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv4i32_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.s
+; CHECK-VLA-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv4i32_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: str q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %3 = tail call i64 @llvm.vscale.i64()
+ %4 = shl nuw nsw i64 %3, 4
+ %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+ store <vscale x 4 x i32> %1, ptr %5, align 16
+ ret void
+}
+
+define <vscale x 2 x i64> @ld_nxv2i64_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv2i64_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.d
+; CHECK-VLA-NEXT: ld1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv2i64_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: ldr q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %2 = tail call i64 @llvm.vscale.i64()
+ %3 = shl nuw nsw i64 %2, 4
+ %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+ %5 = load <vscale x 2 x i64>, ptr %4, align 16
+ ret <vscale x 2 x i64> %5
+}
+
+define void @st_nxv2i64_offset(ptr %0, <vscale x 2 x i64> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv2i64_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.d
+; CHECK-VLA-NEXT: st1d { z0.d }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv2i64_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: str q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %3 = tail call i64 @llvm.vscale.i64()
+ %4 = shl nuw nsw i64 %3, 4
+ %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+ store <vscale x 2 x i64> %1, ptr %5, align 16
+ ret void
+}
+
+define <vscale x 8 x half> @ld_nxv8f16_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv8f16_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.h
+; CHECK-VLA-NEXT: ld1h { z0.h }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv8f16_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: ldr q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %2 = tail call i64 @llvm.vscale.i64()
+ %3 = shl nuw nsw i64 %2, 4
+ %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+ %5 = load <vscale x 8 x half>, ptr %4, align 16
+ ret <vscale x 8 x half> %5
+}
+
+define void @st_nxv8f16_offset(ptr %0, <vscale x 8 x half> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv8f16_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.h
+; CHECK-VLA-NEXT: st1h { z0.h }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv8f16_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: str q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %3 = tail call i64 @llvm.vscale.i64()
+ %4 = shl nuw nsw i64 %3, 4
+ %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+ store <vscale x 8 x half> %1, ptr %5, align 16
+ ret void
+}
+
+define <vscale x 4 x float> @ld_nxv4f32_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv4f32_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.s
+; CHECK-VLA-NEXT: ld1w { z0.s }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv4f32_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: ldr q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %2 = tail call i64 @llvm.vscale.i64()
+ %3 = shl nuw nsw i64 %2, 4
+ %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+ %5 = load <vscale x 4 x float>, ptr %4, align 16
+ ret <vscale x 4 x float> %5
+}
+
+define void @st_nxv4f32_offset(ptr %0, <vscale x 4 x float> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv4f32_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.s
+; CHECK-VLA-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv4f32_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: str q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %3 = tail call i64 @llvm.vscale.i64()
+ %4 = shl nuw nsw i64 %3, 4
+ %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+ store <vscale x 4 x float> %1, ptr %5, align 16
+ ret void
+}
+
+define <vscale x 2 x double> @ld_nxv2f64_offset(ptr %0) #0 {
+; CHECK-VLA-LABEL: ld_nxv2f64_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.d
+; CHECK-VLA-NEXT: ld1d { z0.d }, p0/z, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: ld_nxv2f64_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: ldr q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %2 = tail call i64 @llvm.vscale.i64()
+ %3 = shl nuw nsw i64 %2, 4
+ %4 = getelementptr inbounds nuw i8, ptr %0, i64 %3
+ %5 = load <vscale x 2 x double>, ptr %4, align 16
+ ret <vscale x 2 x double> %5
+}
+
+define void @st_nxv2f64_offset(ptr %0, <vscale x 2 x double> %1) #0 {
+; CHECK-VLA-LABEL: st_nxv2f64_offset:
+; CHECK-VLA: // %bb.0:
+; CHECK-VLA-NEXT: ptrue p0.d
+; CHECK-VLA-NEXT: st1d { z0.d }, p0, [x0, #1, mul vl]
+; CHECK-VLA-NEXT: ret
+;
+; CHECK-128-LABEL: st_nxv2f64_offset:
+; CHECK-128: // %bb.0:
+; CHECK-128-NEXT: rdvl x8, #1
+; CHECK-128-NEXT: str q0, [x0, x8]
+; CHECK-128-NEXT: ret
+ %3 = tail call i64 @llvm.vscale.i64()
+ %4 = shl nuw nsw i64 %3, 4
+ %5 = getelementptr inbounds nuw i8, ptr %0, i64 %4
+ store <vscale x 2 x double> %1, ptr %5, align 16
+ ret void
+}
+
+attributes #0 = { "target-features"="+sve" }
``````````
</details>
https://github.com/llvm/llvm-project/pull/127500
More information about the llvm-commits
mailing list