[llvm] [AArch64][SVE] Pair SVE fill/spill into LDP/STP with -msve-vector-bits=128. (PR #134068)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 2 06:46:29 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: Ricardo Jesus (rj-jesus)
<details>
<summary>Changes</summary>
When compiling with -msve-vector-bits=128 or vscale_range(1, 1) and when
the offsets allow it, we can pair SVE LDR/STR instructions into Neon
LDP/STP.
For example, given (https://godbolt.org/z/srjxYbK9s):
```cpp
#include <arm_sve.h>
void foo(double const *ldp, double *stp) {
svbool_t pg = svptrue_b64();
svfloat64_t ld1 = svld1_f64(pg, ldp);
svfloat64_t ld2 = svld1_f64(pg, ldp+svcntd());
svst1_f64(pg, stp, ld1);
svst1_f64(pg, stp+svcntd(), ld2);
}
```
When compiled with `-msve-vector-bits=128`, we currently generate:
```gas
foo:
ldr z0, [x0]
ldr z1, [x0, #<!-- -->1, mul vl]
str z0, [x1]
str z1, [x1, #<!-- -->1, mul vl]
ret
```
With this patch, we instead generate:
```gas
foo:
ldp q0, q1, [x0]
stp q0, q1, [x1]
ret
```
Loading (and to a lesser extent, storing) multiple registers from a
common base address is a commonly occurring pattern, but multi-register
SVE load/store instructions are only supported starting with SVE2.1. This
patch offers an alternative for SVE 128-bit implementations.
This is an alternative, more targetted approach to #<!-- -->127500.
---
Full diff: https://github.com/llvm/llvm-project/pull/134068.diff
2 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp (+47-1)
- (added) llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll (+218)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index cd976790ebb6f..f1f1f66e12216 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -87,6 +87,10 @@ static cl::opt<unsigned> LdStConstLimit("aarch64-load-store-const-scan-limit",
static cl::opt<bool> EnableRenaming("aarch64-load-store-renaming",
cl::init(true), cl::Hidden);
+// Enable SVE fill/spill pairing for VLS 128.
+static cl::opt<bool> EnableSVEFillSpillPairing("aarch64-sve-fill-spill-pairing",
+ cl::init(true), cl::Hidden);
+
#define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
namespace {
@@ -97,6 +101,9 @@ using LdStPairFlags = struct LdStPairFlags {
// a pair-wise insn, and false if the reverse is true.
bool MergeForward = false;
+ // Set to true when pairing SVE fill/spill instructions.
+ bool SVEFillSpillPair = false;
+
// SExtIdx gives the index of the result of the load pair that must be
// extended. The value of SExtIdx assumes that the paired load produces the
// value in this order: (I, returned iterator), i.e., -1 means no value has
@@ -113,6 +120,9 @@ using LdStPairFlags = struct LdStPairFlags {
void setMergeForward(bool V = true) { MergeForward = V; }
bool getMergeForward() const { return MergeForward; }
+ void setSVEFillSpillPair(bool V = true) { SVEFillSpillPair = V; }
+ bool getSVEFillSpillPair() const { return SVEFillSpillPair; }
+
void setSExtIdx(int V) { SExtIdx = V; }
int getSExtIdx() const { return SExtIdx; }
@@ -300,6 +310,7 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc,
case AArch64::STRXui:
case AArch64::STRXpre:
case AArch64::STURXi:
+ case AArch64::STR_ZXI:
case AArch64::LDRDui:
case AArch64::LDURDi:
case AArch64::LDRDpre:
@@ -318,6 +329,7 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc,
case AArch64::LDRSui:
case AArch64::LDURSi:
case AArch64::LDRSpre:
+ case AArch64::LDR_ZXI:
return Opc;
case AArch64::LDRSWui:
return AArch64::LDRWui;
@@ -363,6 +375,7 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
return AArch64::STPDpre;
case AArch64::STRQui:
case AArch64::STURQi:
+ case AArch64::STR_ZXI:
return AArch64::STPQi;
case AArch64::STRQpre:
return AArch64::STPQpre;
@@ -388,6 +401,7 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
return AArch64::LDPDpre;
case AArch64::LDRQui:
case AArch64::LDURQi:
+ case AArch64::LDR_ZXI:
return AArch64::LDPQi;
case AArch64::LDRQpre:
return AArch64::LDPQpre;
@@ -833,6 +847,12 @@ static bool isMergeableIndexLdSt(MachineInstr &MI, int &Scale) {
}
}
+// Return true if MI is an SVE fill/spill instruction.
+static bool isPairableFillSpillInst(const MachineInstr &MI) {
+ auto const Opc = MI.getOpcode();
+ return Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI;
+}
+
static bool isRewritableImplicitDef(unsigned Opc) {
switch (Opc) {
default:
@@ -1227,6 +1247,15 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
(void)MIBSXTW;
LLVM_DEBUG(dbgs() << " Extend operand:\n ");
LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
+ } else if (Flags.getSVEFillSpillPair()) {
+ // We are combining SVE fill/spill to LDP/STP, so we need to get the Q
+ // variant of the registers.
+ MachineOperand &MOp0 = MIB->getOperand(0);
+ MachineOperand &MOp1 = MIB->getOperand(1);
+ assert(AArch64::ZPRRegClass.contains(MOp0.getReg()) &&
+ AArch64::ZPRRegClass.contains(MOp1.getReg()) && "Invalid register.");
+ MOp0.setReg(AArch64::Q0 + (MOp0.getReg() - AArch64::Z0));
+ MOp1.setReg(AArch64::Q0 + (MOp1.getReg() - AArch64::Z0));
} else {
LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
}
@@ -1829,6 +1858,9 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
Flags.clearRenameReg();
+ if (isPairableFillSpillInst(FirstMI))
+ Flags.setSVEFillSpillPair();
+
// Track which register units have been modified and used between the first
// insn (inclusive) and the second insn.
ModifiedRegUnits.clear();
@@ -2661,7 +2693,8 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
// Get the needed alignments to check them if
// ldp-aligned-only/stp-aligned-only features are opted.
uint64_t MemAlignment = MemOp->getAlign().value();
- uint64_t TypeAlignment = Align(MemOp->getSize().getValue()).value();
+ uint64_t TypeAlignment =
+ Align(MemOp->getSize().getValue().getKnownMinValue()).value();
if (MemAlignment < 2 * TypeAlignment) {
NumFailedAlignmentCheck++;
@@ -2782,6 +2815,9 @@ bool AArch64LoadStoreOpt::tryToMergeIndexLdSt(MachineBasicBlock::iterator &MBBI,
bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
bool EnableNarrowZeroStOpt) {
AArch64FunctionInfo &AFI = *MBB.getParent()->getInfo<AArch64FunctionInfo>();
+ bool const CanPairFillSpill = EnableSVEFillSpillPairing &&
+ Subtarget->isSVEorStreamingSVEAvailable() &&
+ Subtarget->getSVEVectorSizeInBits() == 128;
bool Modified = false;
// Four tranformations to do here:
@@ -2822,11 +2858,18 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
}
// 3) Find loads and stores that can be merged into a single load or store
// pair instruction.
+ // When compiling for SVE 128, also try to combine SVE fill/spill
+ // instructions into LDP/STP.
// e.g.,
// ldr x0, [x2]
// ldr x1, [x2, #8]
// ; becomes
// ldp x0, x1, [x2]
+ // e.g.,
+ // ldr z0, [x2]
+ // ldr z1, [x2, #1, mul vl]
+ // ; becomes
+ // ldp q0, q1, [x2]
if (MBB.getParent()->getRegInfo().tracksLiveness()) {
DefinedInBB.clear();
@@ -2840,6 +2883,9 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
updateDefinedRegisters(*MBBI, DefinedInBB, TRI);
if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI))
Modified = true;
+ else if (CanPairFillSpill && isPairableFillSpillInst(*MBBI) &&
+ tryToPairLdStInst(MBBI))
+ Modified = true;
else
++MBBI;
}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
new file mode 100644
index 0000000000000..79120bc5352aa
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
@@ -0,0 +1,218 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,ldp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-LDPALIGNEDONLY
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,stp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-STPALIGNEDONLY
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK-OFF
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK-OFF
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 -aarch64-sve-fill-spill-pairing=0 < %s | FileCheck %s --check-prefixes=CHECK-OFF
+
+define void @nxv16i8(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldp q0, q1, [x0]
+; CHECK-NEXT: stp q0, q1, [x1]
+; CHECK-NEXT: ret
+;
+; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8:
+; CHECK-LDPALIGNEDONLY: // %bb.0:
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0]
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1]
+; CHECK-LDPALIGNEDONLY-NEXT: ret
+;
+; CHECK-STPALIGNEDONLY-LABEL: nxv16i8:
+; CHECK-STPALIGNEDONLY: // %bb.0:
+; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
+; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1]
+; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: ret
+;
+; CHECK-OFF-LABEL: nxv16i8:
+; CHECK-OFF: // %bb.0:
+; CHECK-OFF-NEXT: ldr z0, [x0]
+; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl]
+; CHECK-OFF-NEXT: str z0, [x1]
+; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl]
+; CHECK-OFF-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale()
+ %vl = shl nuw nsw i64 %vscale, 4
+ %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
+ %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
+ %ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
+ %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
+ store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
+ store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
+ ret void
+}
+
+define void @nxv16i8_max_range(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv16i8_max_range:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldp q0, q1, [x0, #-1024]
+; CHECK-NEXT: stp q0, q1, [x1, #1008]
+; CHECK-NEXT: ret
+;
+; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_max_range:
+; CHECK-LDPALIGNEDONLY: // %bb.0:
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-64, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-63, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1, #1008]
+; CHECK-LDPALIGNEDONLY-NEXT: ret
+;
+; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_max_range:
+; CHECK-STPALIGNEDONLY: // %bb.0:
+; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0, #-1024]
+; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #63, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #64, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: ret
+;
+; CHECK-OFF-LABEL: nxv16i8_max_range:
+; CHECK-OFF: // %bb.0:
+; CHECK-OFF-NEXT: ldr z0, [x0, #-64, mul vl]
+; CHECK-OFF-NEXT: ldr z1, [x0, #-63, mul vl]
+; CHECK-OFF-NEXT: str z0, [x1, #63, mul vl]
+; CHECK-OFF-NEXT: str z1, [x1, #64, mul vl]
+; CHECK-OFF-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale()
+ %ldoff1 = mul i64 %vscale, -1024
+ %ldoff2 = mul i64 %vscale, -1008
+ %stoff1 = mul i64 %vscale, 1008
+ %stoff2 = mul i64 %vscale, 1024
+ %ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
+ %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
+ %stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
+ %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
+ %ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
+ %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
+ store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
+ store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
+ ret void
+}
+
+define void @nxv16i8_outside_range(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv16i8_outside_range:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #-65, mul vl]
+; CHECK-NEXT: ldr z1, [x0, #-64, mul vl]
+; CHECK-NEXT: str z0, [x1, #64, mul vl]
+; CHECK-NEXT: str z1, [x1, #65, mul vl]
+; CHECK-NEXT: ret
+;
+; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_outside_range:
+; CHECK-LDPALIGNEDONLY: // %bb.0:
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: ret
+;
+; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_outside_range:
+; CHECK-STPALIGNEDONLY: // %bb.0:
+; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: ret
+;
+; CHECK-OFF-LABEL: nxv16i8_outside_range:
+; CHECK-OFF: // %bb.0:
+; CHECK-OFF-NEXT: ldr z0, [x0, #-65, mul vl]
+; CHECK-OFF-NEXT: ldr z1, [x0, #-64, mul vl]
+; CHECK-OFF-NEXT: str z0, [x1, #64, mul vl]
+; CHECK-OFF-NEXT: str z1, [x1, #65, mul vl]
+; CHECK-OFF-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale()
+ %ldoff1 = mul i64 %vscale, -1040
+ %ldoff2 = mul i64 %vscale, -1024
+ %stoff1 = mul i64 %vscale, 1024
+ %stoff2 = mul i64 %vscale, 1040
+ %ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
+ %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
+ %stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
+ %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
+ %ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
+ %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
+ store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
+ store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
+ ret void
+}
+
+define void @nxv16i8_2vl_stride(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv16i8_2vl_stride:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: str z1, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+;
+; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
+; CHECK-LDPALIGNEDONLY: // %bb.0:
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0]
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1]
+; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: ret
+;
+; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
+; CHECK-STPALIGNEDONLY: // %bb.0:
+; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0]
+; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1]
+; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: ret
+;
+; CHECK-OFF-LABEL: nxv16i8_2vl_stride:
+; CHECK-OFF: // %bb.0:
+; CHECK-OFF-NEXT: ldr z0, [x0]
+; CHECK-OFF-NEXT: ldr z1, [x0, #2, mul vl]
+; CHECK-OFF-NEXT: str z0, [x1]
+; CHECK-OFF-NEXT: str z1, [x1, #2, mul vl]
+; CHECK-OFF-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale()
+ %vl = shl nuw nsw i64 %vscale, 5
+ %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
+ %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
+ %ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
+ %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
+ store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
+ store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
+ ret void
+}
+
+define void @nxv2f64_32b_aligned(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv2f64_32b_aligned:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldp q0, q1, [x0]
+; CHECK-NEXT: stp q0, q1, [x1]
+; CHECK-NEXT: ret
+;
+; CHECK-LDPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
+; CHECK-LDPALIGNEDONLY: // %bb.0:
+; CHECK-LDPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
+; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1]
+; CHECK-LDPALIGNEDONLY-NEXT: ret
+;
+; CHECK-STPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
+; CHECK-STPALIGNEDONLY: // %bb.0:
+; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
+; CHECK-STPALIGNEDONLY-NEXT: stp q0, q1, [x1]
+; CHECK-STPALIGNEDONLY-NEXT: ret
+;
+; CHECK-OFF-LABEL: nxv2f64_32b_aligned:
+; CHECK-OFF: // %bb.0:
+; CHECK-OFF-NEXT: ldr z0, [x0]
+; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl]
+; CHECK-OFF-NEXT: str z0, [x1]
+; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl]
+; CHECK-OFF-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale()
+ %vl = shl nuw nsw i64 %vscale, 4
+ %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
+ %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
+ %ld1 = load <vscale x 2 x double>, ptr %ldptr, align 32
+ %ld2 = load <vscale x 2 x double>, ptr %ldptr2, align 32
+ store <vscale x 2 x double> %ld1, ptr %stptr, align 32
+ store <vscale x 2 x double> %ld2, ptr %stptr2, align 32
+ ret void
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/134068
More information about the llvm-commits
mailing list