[llvm] c80080f - [AArch64][SVE] Pair SVE fill/spill into LDP/STP with -msve-vector-bits=128. (#134068)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 9 04:19:20 PDT 2025
Author: Ricardo Jesus
Date: 2025-04-09T12:19:17+01:00
New Revision: c80080ff7e105eb42d486ed473fa9c82fb518b0a
URL: https://github.com/llvm/llvm-project/commit/c80080ff7e105eb42d486ed473fa9c82fb518b0a
DIFF: https://github.com/llvm/llvm-project/commit/c80080ff7e105eb42d486ed473fa9c82fb518b0a.diff
LOG: [AArch64][SVE] Pair SVE fill/spill into LDP/STP with -msve-vector-bits=128. (#134068)
When compiling with -msve-vector-bits=128 or vscale_range(1, 1) and when
the offsets allow it, we can pair SVE LDR/STR instructions into Neon
LDP/STP.
For example, given:
```cpp
#include <arm_sve.h>
void foo(double const *ldp, double *stp) {
svbool_t pg = svptrue_b64();
svfloat64_t ld1 = svld1_f64(pg, ldp);
svfloat64_t ld2 = svld1_f64(pg, ldp+svcntd());
svst1_f64(pg, stp, ld1);
svst1_f64(pg, stp+svcntd(), ld2);
}
```
When compiled with `-msve-vector-bits=128`, we currently generate:
```gas
foo:
ldr z0, [x0]
ldr z1, [x0, #1, mul vl]
str z0, [x1]
str z1, [x1, #1, mul vl]
ret
```
With this patch, we instead generate:
```gas
foo:
ldp q0, q1, [x0]
stp q0, q1, [x1]
ret
```
This is an alternative, more targetted approach to #127500.
Added:
llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
Modified:
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index d370f8c7ff6ea..74217fad82a7e 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2760,6 +2760,9 @@ bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) {
case AArch64::LDRXpre:
case AArch64::LDURSWi:
case AArch64::LDRSWpre:
+ // SVE instructions.
+ case AArch64::LDR_ZXI:
+ case AArch64::STR_ZXI:
return true;
}
}
@@ -2912,6 +2915,18 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {
return false;
}
+ // Pairing SVE fills/spills is only valid for little-endian targets that
+ // implement VLS 128.
+ switch (MI.getOpcode()) {
+ default:
+ break;
+ case AArch64::LDR_ZXI:
+ case AArch64::STR_ZXI:
+ if (!Subtarget.isLittleEndian() ||
+ Subtarget.getSVEVectorSizeInBits() != 128)
+ return false;
+ }
+
// Check if this load/store has a hint to avoid pair formation.
// MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
if (isLdStPairSuppressed(MI))
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 06e633effe874..68ed68b9e32bc 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -298,6 +298,7 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc,
case AArch64::STRXui:
case AArch64::STRXpre:
case AArch64::STURXi:
+ case AArch64::STR_ZXI:
case AArch64::LDRDui:
case AArch64::LDURDi:
case AArch64::LDRDpre:
@@ -316,6 +317,7 @@ static unsigned getMatchingNonSExtOpcode(unsigned Opc,
case AArch64::LDRSui:
case AArch64::LDURSi:
case AArch64::LDRSpre:
+ case AArch64::LDR_ZXI:
return Opc;
case AArch64::LDRSWui:
return AArch64::LDRWui;
@@ -361,6 +363,7 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
return AArch64::STPDpre;
case AArch64::STRQui:
case AArch64::STURQi:
+ case AArch64::STR_ZXI:
return AArch64::STPQi;
case AArch64::STRQpre:
return AArch64::STPQpre;
@@ -386,6 +389,7 @@ static unsigned getMatchingPairOpcode(unsigned Opc) {
return AArch64::LDPDpre;
case AArch64::LDRQui:
case AArch64::LDURQi:
+ case AArch64::LDR_ZXI:
return AArch64::LDPQi;
case AArch64::LDRQpre:
return AArch64::LDPQpre;
@@ -1225,6 +1229,16 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
(void)MIBSXTW;
LLVM_DEBUG(dbgs() << " Extend operand:\n ");
LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
+ } else if (Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI) {
+ // We are combining SVE fill/spill to LDP/STP, so we need to use the Q
+ // variant of the registers.
+ MachineOperand &MOp0 = MIB->getOperand(0);
+ MachineOperand &MOp1 = MIB->getOperand(1);
+ assert(AArch64::ZPRRegClass.contains(MOp0.getReg()) &&
+ AArch64::ZPRRegClass.contains(MOp1.getReg()) && "Invalid register.");
+ MOp0.setReg(AArch64::Q0 + (MOp0.getReg() - AArch64::Z0));
+ MOp1.setReg(AArch64::Q0 + (MOp1.getReg() - AArch64::Z0));
+ LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
} else {
LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
}
@@ -2659,7 +2673,8 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
// Get the needed alignments to check them if
// ldp-aligned-only/stp-aligned-only features are opted.
uint64_t MemAlignment = MemOp->getAlign().value();
- uint64_t TypeAlignment = Align(MemOp->getSize().getValue()).value();
+ uint64_t TypeAlignment =
+ Align(MemOp->getSize().getValue().getKnownMinValue()).value();
if (MemAlignment < 2 * TypeAlignment) {
NumFailedAlignmentCheck++;
@@ -2820,11 +2835,18 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
}
// 3) Find loads and stores that can be merged into a single load or store
// pair instruction.
+ // When compiling for SVE 128, also try to combine SVE fill/spill
+ // instructions into LDP/STP.
// e.g.,
// ldr x0, [x2]
// ldr x1, [x2, #8]
// ; becomes
// ldp x0, x1, [x2]
+ // e.g.,
+ // ldr z0, [x2]
+ // ldr z1, [x2, #1, mul vl]
+ // ; becomes
+ // ldp q0, q1, [x2]
if (MBB.getParent()->getRegInfo().tracksLiveness()) {
DefinedInBB.clear();
diff --git a/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
new file mode 100644
index 0000000000000..503ead4eba2db
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-sve-fill-spill-pair.ll
@@ -0,0 +1,283 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=aarch64_be-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-BE
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,ldp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-LDPALIGNEDONLY
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,stp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-STPALIGNEDONLY
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK-OFF
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK-OFF
+
+define void @nxv16i8(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldp q0, q1, [x0]
+; CHECK-NEXT: stp q0, q1, [x1]
+; CHECK-NEXT: ret
+;
+; CHECK-BE-LABEL: nxv16i8:
+; CHECK-BE: // %bb.0:
+; CHECK-BE-NEXT: ptrue p0.b
+; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, #1, mul vl]
+; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, #1, mul vl]
+; CHECK-BE-NEXT: ret
+;
+; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8:
+; CHECK-LDPALIGNEDONLY: // %bb.0:
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0]
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1]
+; CHECK-LDPALIGNEDONLY-NEXT: ret
+;
+; CHECK-STPALIGNEDONLY-LABEL: nxv16i8:
+; CHECK-STPALIGNEDONLY: // %bb.0:
+; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
+; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1]
+; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: ret
+;
+; CHECK-OFF-LABEL: nxv16i8:
+; CHECK-OFF: // %bb.0:
+; CHECK-OFF-NEXT: ldr z0, [x0]
+; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl]
+; CHECK-OFF-NEXT: str z0, [x1]
+; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl]
+; CHECK-OFF-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale()
+ %vl = shl nuw nsw i64 %vscale, 4
+ %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
+ %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
+ %ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
+ %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
+ store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
+ store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
+ ret void
+}
+
+define void @nxv16i8_max_range(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv16i8_max_range:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldp q0, q1, [x0, #-1024]
+; CHECK-NEXT: stp q0, q1, [x1, #1008]
+; CHECK-NEXT: ret
+;
+; CHECK-BE-LABEL: nxv16i8_max_range:
+; CHECK-BE: // %bb.0:
+; CHECK-BE-NEXT: rdvl x8, #1
+; CHECK-BE-NEXT: mov x9, #-1008 // =0xfffffffffffffc10
+; CHECK-BE-NEXT: mov x10, #-1024 // =0xfffffffffffffc00
+; CHECK-BE-NEXT: lsr x8, x8, #4
+; CHECK-BE-NEXT: mov w11, #1008 // =0x3f0
+; CHECK-BE-NEXT: mov w12, #1024 // =0x400
+; CHECK-BE-NEXT: ptrue p0.b
+; CHECK-BE-NEXT: mul x9, x8, x9
+; CHECK-BE-NEXT: mul x10, x8, x10
+; CHECK-BE-NEXT: mul x11, x8, x11
+; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, x9]
+; CHECK-BE-NEXT: mul x8, x8, x12
+; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0, x10]
+; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1, x11]
+; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, x8]
+; CHECK-BE-NEXT: ret
+;
+; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_max_range:
+; CHECK-LDPALIGNEDONLY: // %bb.0:
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-64, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-63, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1, #1008]
+; CHECK-LDPALIGNEDONLY-NEXT: ret
+;
+; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_max_range:
+; CHECK-STPALIGNEDONLY: // %bb.0:
+; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0, #-1024]
+; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #63, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #64, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: ret
+;
+; CHECK-OFF-LABEL: nxv16i8_max_range:
+; CHECK-OFF: // %bb.0:
+; CHECK-OFF-NEXT: ldr z0, [x0, #-64, mul vl]
+; CHECK-OFF-NEXT: ldr z1, [x0, #-63, mul vl]
+; CHECK-OFF-NEXT: str z0, [x1, #63, mul vl]
+; CHECK-OFF-NEXT: str z1, [x1, #64, mul vl]
+; CHECK-OFF-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale()
+ %ldoff1 = mul i64 %vscale, -1024
+ %ldoff2 = mul i64 %vscale, -1008
+ %stoff1 = mul i64 %vscale, 1008
+ %stoff2 = mul i64 %vscale, 1024
+ %ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
+ %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
+ %stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
+ %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
+ %ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
+ %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
+ store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
+ store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
+ ret void
+}
+
+define void @nxv16i8_outside_range(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv16i8_outside_range:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #-65, mul vl]
+; CHECK-NEXT: ldr z1, [x0, #-64, mul vl]
+; CHECK-NEXT: str z0, [x1, #64, mul vl]
+; CHECK-NEXT: str z1, [x1, #65, mul vl]
+; CHECK-NEXT: ret
+;
+; CHECK-BE-LABEL: nxv16i8_outside_range:
+; CHECK-BE: // %bb.0:
+; CHECK-BE-NEXT: rdvl x8, #1
+; CHECK-BE-NEXT: mov x9, #-1040 // =0xfffffffffffffbf0
+; CHECK-BE-NEXT: mov x10, #-1024 // =0xfffffffffffffc00
+; CHECK-BE-NEXT: lsr x8, x8, #4
+; CHECK-BE-NEXT: mov w11, #1024 // =0x400
+; CHECK-BE-NEXT: mov w12, #1040 // =0x410
+; CHECK-BE-NEXT: ptrue p0.b
+; CHECK-BE-NEXT: mul x9, x8, x9
+; CHECK-BE-NEXT: mul x10, x8, x10
+; CHECK-BE-NEXT: mul x11, x8, x11
+; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0, x9]
+; CHECK-BE-NEXT: mul x8, x8, x12
+; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, x10]
+; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1, x11]
+; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, x8]
+; CHECK-BE-NEXT: ret
+;
+; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_outside_range:
+; CHECK-LDPALIGNEDONLY: // %bb.0:
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: ret
+;
+; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_outside_range:
+; CHECK-STPALIGNEDONLY: // %bb.0:
+; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: ret
+;
+; CHECK-OFF-LABEL: nxv16i8_outside_range:
+; CHECK-OFF: // %bb.0:
+; CHECK-OFF-NEXT: ldr z0, [x0, #-65, mul vl]
+; CHECK-OFF-NEXT: ldr z1, [x0, #-64, mul vl]
+; CHECK-OFF-NEXT: str z0, [x1, #64, mul vl]
+; CHECK-OFF-NEXT: str z1, [x1, #65, mul vl]
+; CHECK-OFF-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale()
+ %ldoff1 = mul i64 %vscale, -1040
+ %ldoff2 = mul i64 %vscale, -1024
+ %stoff1 = mul i64 %vscale, 1024
+ %stoff2 = mul i64 %vscale, 1040
+ %ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
+ %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
+ %stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
+ %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
+ %ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
+ %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
+ store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
+ store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
+ ret void
+}
+
+define void @nxv16i8_2vl_stride(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv16i8_2vl_stride:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ldr z1, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: str z1, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+;
+; CHECK-BE-LABEL: nxv16i8_2vl_stride:
+; CHECK-BE: // %bb.0:
+; CHECK-BE-NEXT: ptrue p0.b
+; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, #2, mul vl]
+; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, #2, mul vl]
+; CHECK-BE-NEXT: ret
+;
+; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
+; CHECK-LDPALIGNEDONLY: // %bb.0:
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0]
+; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1]
+; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl]
+; CHECK-LDPALIGNEDONLY-NEXT: ret
+;
+; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
+; CHECK-STPALIGNEDONLY: // %bb.0:
+; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0]
+; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1]
+; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl]
+; CHECK-STPALIGNEDONLY-NEXT: ret
+;
+; CHECK-OFF-LABEL: nxv16i8_2vl_stride:
+; CHECK-OFF: // %bb.0:
+; CHECK-OFF-NEXT: ldr z0, [x0]
+; CHECK-OFF-NEXT: ldr z1, [x0, #2, mul vl]
+; CHECK-OFF-NEXT: str z0, [x1]
+; CHECK-OFF-NEXT: str z1, [x1, #2, mul vl]
+; CHECK-OFF-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale()
+ %vl = shl nuw nsw i64 %vscale, 5
+ %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
+ %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
+ %ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
+ %ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
+ store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
+ store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
+ ret void
+}
+
+define void @nxv2f64_32b_aligned(ptr %ldptr, ptr %stptr) {
+; CHECK-LABEL: nxv2f64_32b_aligned:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldp q0, q1, [x0]
+; CHECK-NEXT: stp q0, q1, [x1]
+; CHECK-NEXT: ret
+;
+; CHECK-BE-LABEL: nxv2f64_32b_aligned:
+; CHECK-BE: // %bb.0:
+; CHECK-BE-NEXT: ptrue p0.d
+; CHECK-BE-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-BE-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-BE-NEXT: st1d { z0.d }, p0, [x1]
+; CHECK-BE-NEXT: st1d { z1.d }, p0, [x1, #1, mul vl]
+; CHECK-BE-NEXT: ret
+;
+; CHECK-LDPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
+; CHECK-LDPALIGNEDONLY: // %bb.0:
+; CHECK-LDPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
+; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1]
+; CHECK-LDPALIGNEDONLY-NEXT: ret
+;
+; CHECK-STPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
+; CHECK-STPALIGNEDONLY: // %bb.0:
+; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
+; CHECK-STPALIGNEDONLY-NEXT: stp q0, q1, [x1]
+; CHECK-STPALIGNEDONLY-NEXT: ret
+;
+; CHECK-OFF-LABEL: nxv2f64_32b_aligned:
+; CHECK-OFF: // %bb.0:
+; CHECK-OFF-NEXT: ldr z0, [x0]
+; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl]
+; CHECK-OFF-NEXT: str z0, [x1]
+; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl]
+; CHECK-OFF-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale()
+ %vl = shl nuw nsw i64 %vscale, 4
+ %ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
+ %stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
+ %ld1 = load <vscale x 2 x double>, ptr %ldptr, align 32
+ %ld2 = load <vscale x 2 x double>, ptr %ldptr2, align 32
+ store <vscale x 2 x double> %ld1, ptr %stptr, align 32
+ store <vscale x 2 x double> %ld2, ptr %stptr2, align 32
+ ret void
+}
More information about the llvm-commits
mailing list