[llvm] [AArch64] Fix using NEON copies in streaming-mode-enable regions. (PR #174738)
Amara Emerson via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 9 02:13:41 PST 2026
https://github.com/aemerson updated https://github.com/llvm/llvm-project/pull/174738
>From 6039cc6b35ce8b42750d60d65796fb2a97142485 Mon Sep 17 00:00:00 2001
From: Amara Emerson <amara at apple.com>
Date: Tue, 6 Jan 2026 23:22:46 -0800
Subject: [PATCH 1/5] [AArch64] Fix using NEON copies in streaming-mode-enable
regions.
The current checks for if we're allowed to use a NEON copy works based on the
function attributes, which works most of the time. However in one particular
case where a normal function calls a streaming one, there's a window of time
where we enable SM at the call site and the emit a copy for an outgoign parameter.
This copy was lowered to a NEON move which is illegal.
There's also another case where we could end up generating these, related
to zero cycle move tuning features.
Both of these cases is fixed in this patch by walking back from the copy to look
for any streaming mode changes (within the current block). I know this is
pretty ugly but I don't have a better solution right now.
rdar://167439642
---
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 44 ++-
.../AArch64/sme-streaming-mode-fpr-copy.ll | 312 ++++++++++++++++++
2 files changed, 350 insertions(+), 6 deletions(-)
create mode 100644 llvm/test/CodeGen/AArch64/sme-streaming-mode-fpr-copy.ll
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 7e1dd8f16b337..9e9ef730b869c 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5389,6 +5389,31 @@ void AArch64InstrInfo::copyGPRRegTuple(MachineBasicBlock &MBB,
}
}
+/// Returns true if the instruction at I is in a streaming call site region,
+/// within a single basic block.
+/// A "call site streaming region" starts after smstart and ends at smstop
+/// around a call to a streaming function. This walks backward from I.
+static bool isInStreamingCallSiteRegion(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) {
+ MachineFunction &MF = *MBB.getParent();
+ AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ if (!AFI->hasStreamingModeChanges())
+ return false;
+ // Walk backwards to find smstart/smstop
+ for (MachineInstr &MI : reverse(make_range(MBB.begin(), I))) {
+ unsigned Opc = MI.getOpcode();
+ if (Opc == AArch64::MSRpstatesvcrImm1 || Opc == AArch64::MSRpstatePseudo) {
+ // Check if this is SM change (not ZA)
+ int64_t PState = MI.getOperand(0).getImm();
+ if (PState == AArch64SVCR::SVCRSM || PState == AArch64SVCR::SVCRSMZA) {
+ // Operand 1 is 1 for start, 0 for stop
+ return MI.getOperand(1).getImm() == 1;
+ }
+ }
+ }
+ return false;
+}
+
void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, Register DestReg,
@@ -5674,8 +5699,11 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (AArch64::FPR128RegClass.contains(DestReg) &&
AArch64::FPR128RegClass.contains(SrcReg)) {
- if (Subtarget.isSVEorStreamingSVEAvailable() &&
- !Subtarget.isNeonAvailable()) {
+ // In streaming regions, NEON is illegal but streaming-SVE is available.
+ // Use SVE for copies if we're in a streaming region and SME is available.
+ if ((Subtarget.isSVEorStreamingSVEAvailable() &&
+ !Subtarget.isNeonAvailable()) ||
+ isInStreamingCallSiteRegion(MBB, I)) {
BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ))
.addReg(AArch64::Z0 + (DestReg - AArch64::Q0), RegState::Define)
.addReg(AArch64::Z0 + (SrcReg - AArch64::Q0))
@@ -5705,7 +5733,8 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
AArch64::FPR64RegClass.contains(SrcReg)) {
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
- !Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable()) {
+ !Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable() &&
+ !isInStreamingCallSiteRegion(MBB, I)) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
@@ -5732,7 +5761,8 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
AArch64::FPR32RegClass.contains(SrcReg)) {
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
- !Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable()) {
+ !Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable() &&
+ !isInStreamingCallSiteRegion(MBB, I)) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
@@ -5773,7 +5803,8 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
AArch64::FPR16RegClass.contains(SrcReg)) {
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
- !Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable()) {
+ !Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable() &&
+ !isInStreamingCallSiteRegion(MBB, I)) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
@@ -5814,7 +5845,8 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
AArch64::FPR8RegClass.contains(SrcReg)) {
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
- !Subtarget.hasZeroCycleRegMoveFPR64() && Subtarget.isNeonAvailable()) {
+ !Subtarget.hasZeroCycleRegMoveFPR64() && Subtarget.isNeonAvailable() &&
+ !isInStreamingCallSiteRegion(MBB, I)) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-mode-fpr-copy.ll b/llvm/test/CodeGen/AArch64/sme-streaming-mode-fpr-copy.ll
new file mode 100644
index 0000000000000..bf026d9ef8642
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-mode-fpr-copy.ll
@@ -0,0 +1,312 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=arm64-apple-macosx -mcpu=apple-m4 < %s | FileCheck %s
+
+; Test that FPR copies in functions with streaming mode changes
+; use SVE/scalar instructions instead of NEON to avoid illegal instructions
+; in streaming regions. For 32/64b FPR cases Apple zero cycle moves can also
+; trigger this issue.
+
+declare void @streaming_callee(ptr, ptr, <2 x double>, <2 x double>, <2 x double>, <2 x double>) "aarch64_pstate_sm_enabled"
+declare void @normal_callee(ptr, ptr, <2 x double>, <2 x double>, <2 x double>, <2 x double>)
+
+define void @caller(ptr %X, ptr %Y, <2 x double> %C, <2 x double> %S) "target-features"="+sme2" {
+; CHECK-LABEL: caller:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: sub sp, sp, #128
+; CHECK-NEXT: .cfi_def_cfa_offset 128
+; CHECK-NEXT: stp d15, d14, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #64] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #80] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #96] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #112] ; 16-byte Folded Spill
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_offset b8, -24
+; CHECK-NEXT: .cfi_offset b9, -32
+; CHECK-NEXT: .cfi_offset b10, -40
+; CHECK-NEXT: .cfi_offset b11, -48
+; CHECK-NEXT: .cfi_offset b12, -56
+; CHECK-NEXT: .cfi_offset b13, -64
+; CHECK-NEXT: .cfi_offset b14, -72
+; CHECK-NEXT: .cfi_offset b15, -80
+; CHECK-NEXT: stp q1, q0, [sp, #16] ; 32-byte Folded Spill
+; CHECK-NEXT: fneg.2d v0, v1
+; CHECK-NEXT: str q0, [sp] ; 16-byte Spill
+; CHECK-NEXT: smstart sm
+; CHECK-NEXT: ldp q2, q0, [sp, #16] ; 32-byte Folded Reload
+; CHECK-NEXT: ldr q1, [sp] ; 16-byte Reload
+; CHECK-NEXT: mov z3.d, z0.d
+; CHECK-NEXT: bl _streaming_callee
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: ldp x29, x30, [sp, #112] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #96] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #80] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #64] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #48] ; 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #128
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: .cfi_restore w30
+; CHECK-NEXT: .cfi_restore w29
+; CHECK-NEXT: .cfi_restore b8
+; CHECK-NEXT: .cfi_restore b9
+; CHECK-NEXT: .cfi_restore b10
+; CHECK-NEXT: .cfi_restore b11
+; CHECK-NEXT: .cfi_restore b12
+; CHECK-NEXT: .cfi_restore b13
+; CHECK-NEXT: .cfi_restore b14
+; CHECK-NEXT: .cfi_restore b15
+; CHECK-NEXT: ret
+entry:
+ %negS = fneg <2 x double> %S
+ call void @streaming_callee(ptr %X, ptr %Y, <2 x double> %C, <2 x double> %negS, <2 x double> %S, <2 x double> %C)
+ ret void
+}
+
+declare void @streaming_callee_d(i64, i64, double, double, double, double) "aarch64_pstate_sm_enabled"
+
+define void @fpr64_copy(i64 %n, i64 %m, double %c, double %s) "target-features"="+sme2" {
+; CHECK-LABEL: fpr64_copy:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: sub sp, sp, #112
+; CHECK-NEXT: .cfi_def_cfa_offset 112
+; CHECK-NEXT: stp d15, d14, [sp, #32] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #64] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #80] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #96] ; 16-byte Folded Spill
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_offset b8, -24
+; CHECK-NEXT: .cfi_offset b9, -32
+; CHECK-NEXT: .cfi_offset b10, -40
+; CHECK-NEXT: .cfi_offset b11, -48
+; CHECK-NEXT: .cfi_offset b12, -56
+; CHECK-NEXT: .cfi_offset b13, -64
+; CHECK-NEXT: .cfi_offset b14, -72
+; CHECK-NEXT: .cfi_offset b15, -80
+; CHECK-NEXT: stp d1, d0, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: fneg d0, d1
+; CHECK-NEXT: str d0, [sp, #8] ; 8-byte Spill
+; CHECK-NEXT: smstart sm
+; CHECK-NEXT: ldp d2, d0, [sp, #16] ; 16-byte Folded Reload
+; CHECK-NEXT: ldr d1, [sp, #8] ; 8-byte Reload
+; CHECK-NEXT: fmov d3, d0
+; CHECK-NEXT: bl _streaming_callee_d
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: ldp x29, x30, [sp, #96] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #80] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #64] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #48] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #32] ; 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #112
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: .cfi_restore w30
+; CHECK-NEXT: .cfi_restore w29
+; CHECK-NEXT: .cfi_restore b8
+; CHECK-NEXT: .cfi_restore b9
+; CHECK-NEXT: .cfi_restore b10
+; CHECK-NEXT: .cfi_restore b11
+; CHECK-NEXT: .cfi_restore b12
+; CHECK-NEXT: .cfi_restore b13
+; CHECK-NEXT: .cfi_restore b14
+; CHECK-NEXT: .cfi_restore b15
+; CHECK-NEXT: ret
+entry:
+ %negs = fneg double %s
+ call void @streaming_callee_d(i64 %n, i64 %m, double %c, double %negs, double %s, double %c)
+ ret void
+}
+
+declare void @streaming_callee_f(i64, i64, float, float, float, float) "aarch64_pstate_sm_enabled"
+
+define void @fpr32_copy(i64 %n, i64 %m, float %c, float %s) "target-features"="+sme2" {
+; CHECK-LABEL: fpr32_copy:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: sub sp, sp, #96
+; CHECK-NEXT: .cfi_def_cfa_offset 96
+; CHECK-NEXT: stp d15, d14, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #32] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #64] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_offset b8, -24
+; CHECK-NEXT: .cfi_offset b9, -32
+; CHECK-NEXT: .cfi_offset b10, -40
+; CHECK-NEXT: .cfi_offset b11, -48
+; CHECK-NEXT: .cfi_offset b12, -56
+; CHECK-NEXT: .cfi_offset b13, -64
+; CHECK-NEXT: .cfi_offset b14, -72
+; CHECK-NEXT: .cfi_offset b15, -80
+; CHECK-NEXT: stp s1, s0, [sp, #8] ; 8-byte Folded Spill
+; CHECK-NEXT: fneg s0, s1
+; CHECK-NEXT: str s0, [sp, #4] ; 4-byte Spill
+; CHECK-NEXT: smstart sm
+; CHECK-NEXT: ldp s2, s0, [sp, #8] ; 8-byte Folded Reload
+; CHECK-NEXT: ldr s1, [sp, #4] ; 4-byte Reload
+; CHECK-NEXT: fmov s3, s0
+; CHECK-NEXT: bl _streaming_callee_f
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #64] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #48] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #32] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #16] ; 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #96
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: .cfi_restore w30
+; CHECK-NEXT: .cfi_restore w29
+; CHECK-NEXT: .cfi_restore b8
+; CHECK-NEXT: .cfi_restore b9
+; CHECK-NEXT: .cfi_restore b10
+; CHECK-NEXT: .cfi_restore b11
+; CHECK-NEXT: .cfi_restore b12
+; CHECK-NEXT: .cfi_restore b13
+; CHECK-NEXT: .cfi_restore b14
+; CHECK-NEXT: .cfi_restore b15
+; CHECK-NEXT: ret
+entry:
+ %negs = fneg float %s
+ call void @streaming_callee_f(i64 %n, i64 %m, float %c, float %negs, float %s, float %c)
+ ret void
+}
+
+declare void @streaming_callee_h(i64, i64, half, half, half, half) "aarch64_pstate_sm_enabled"
+
+define void @fpr16_copy(i64 %n, i64 %m, half %c, half %s) "target-features"="+sme2" {
+; CHECK-LABEL: fpr16_copy:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: sub sp, sp, #96
+; CHECK-NEXT: .cfi_def_cfa_offset 96
+; CHECK-NEXT: stp d15, d14, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #32] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #64] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_offset b8, -24
+; CHECK-NEXT: .cfi_offset b9, -32
+; CHECK-NEXT: .cfi_offset b10, -40
+; CHECK-NEXT: .cfi_offset b11, -48
+; CHECK-NEXT: .cfi_offset b12, -56
+; CHECK-NEXT: .cfi_offset b13, -64
+; CHECK-NEXT: .cfi_offset b14, -72
+; CHECK-NEXT: .cfi_offset b15, -80
+; CHECK-NEXT: str h1, [sp, #12] ; 2-byte Spill
+; CHECK-NEXT: str h0, [sp, #14] ; 2-byte Spill
+; CHECK-NEXT: fneg h0, h1
+; CHECK-NEXT: str h0, [sp, #10] ; 2-byte Spill
+; CHECK-NEXT: smstart sm
+; CHECK-NEXT: ldr h0, [sp, #14] ; 2-byte Reload
+; CHECK-NEXT: ldr h1, [sp, #10] ; 2-byte Reload
+; CHECK-NEXT: ldr h2, [sp, #12] ; 2-byte Reload
+; CHECK-NEXT: fmov s3, s0
+; CHECK-NEXT: bl _streaming_callee_h
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #64] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #48] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #32] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #16] ; 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #96
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: .cfi_restore w30
+; CHECK-NEXT: .cfi_restore w29
+; CHECK-NEXT: .cfi_restore b8
+; CHECK-NEXT: .cfi_restore b9
+; CHECK-NEXT: .cfi_restore b10
+; CHECK-NEXT: .cfi_restore b11
+; CHECK-NEXT: .cfi_restore b12
+; CHECK-NEXT: .cfi_restore b13
+; CHECK-NEXT: .cfi_restore b14
+; CHECK-NEXT: .cfi_restore b15
+; CHECK-NEXT: ret
+entry:
+ %negs = fneg half %s
+ call void @streaming_callee_h(i64 %n, i64 %m, half %c, half %negs, half %s, half %c)
+ ret void
+}
+
+; Test mixed calls: normal -> streaming -> normal
+define void @mixed_calls(ptr %X, ptr %Y, <2 x double> %C, <2 x double> %S) "target-features"="+sme2" {
+; CHECK-LABEL: mixed_calls:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: sub sp, sp, #192
+; CHECK-NEXT: .cfi_def_cfa_offset 192
+; CHECK-NEXT: stp d15, d14, [sp, #96] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #112] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #128] ; 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #144] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #160] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #176] ; 16-byte Folded Spill
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_offset w19, -24
+; CHECK-NEXT: .cfi_offset w20, -32
+; CHECK-NEXT: .cfi_offset b8, -40
+; CHECK-NEXT: .cfi_offset b9, -48
+; CHECK-NEXT: .cfi_offset b10, -56
+; CHECK-NEXT: .cfi_offset b11, -64
+; CHECK-NEXT: .cfi_offset b12, -72
+; CHECK-NEXT: .cfi_offset b13, -80
+; CHECK-NEXT: .cfi_offset b14, -88
+; CHECK-NEXT: .cfi_offset b15, -96
+; CHECK-NEXT: mov.16b v2, v1
+; CHECK-NEXT: str q1, [sp, #80] ; 16-byte Spill
+; CHECK-NEXT: mov x19, x1
+; CHECK-NEXT: mov x20, x0
+; CHECK-NEXT: fneg.2d v1, v1
+; CHECK-NEXT: stp q0, q1, [sp, #48] ; 32-byte Folded Spill
+; CHECK-NEXT: mov.16b v3, v0
+; CHECK-NEXT: bl _normal_callee
+; CHECK-NEXT: ldr q4, [sp, #48] ; 16-byte Reload
+; CHECK-NEXT: ldp q0, q5, [sp, #64] ; 32-byte Folded Reload
+; CHECK-NEXT: stp q4, q5, [sp, #16] ; 32-byte Folded Spill
+; CHECK-NEXT: str q0, [sp] ; 16-byte Spill
+; CHECK-NEXT: smstart sm
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: ldp q1, q0, [sp] ; 32-byte Folded Reload
+; CHECK-NEXT: ldr q2, [sp, #32] ; 16-byte Reload
+; CHECK-NEXT: mov z3.d, z0.d
+; CHECK-NEXT: bl _streaming_callee
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: mov x0, x20
+; CHECK-NEXT: mov x1, x19
+; CHECK-NEXT: ldp q0, q1, [sp, #48] ; 32-byte Folded Reload
+; CHECK-NEXT: ldr q2, [sp, #80] ; 16-byte Reload
+; CHECK-NEXT: mov.16b v3, v0
+; CHECK-NEXT: bl _normal_callee
+; CHECK-NEXT: ldp x29, x30, [sp, #176] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp x20, x19, [sp, #160] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #144] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #128] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #112] ; 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #96] ; 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #192
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: .cfi_restore w30
+; CHECK-NEXT: .cfi_restore w29
+; CHECK-NEXT: .cfi_restore w19
+; CHECK-NEXT: .cfi_restore w20
+; CHECK-NEXT: .cfi_restore b8
+; CHECK-NEXT: .cfi_restore b9
+; CHECK-NEXT: .cfi_restore b10
+; CHECK-NEXT: .cfi_restore b11
+; CHECK-NEXT: .cfi_restore b12
+; CHECK-NEXT: .cfi_restore b13
+; CHECK-NEXT: .cfi_restore b14
+; CHECK-NEXT: .cfi_restore b15
+; CHECK-NEXT: ret
+entry:
+ %negS = fneg <2 x double> %S
+ ; First call - normal function
+ call void @normal_callee(ptr %X, ptr %Y, <2 x double> %C, <2 x double> %negS, <2 x double> %S, <2 x double> %C)
+ ; Second call - streaming function (requires smstart/smstop)
+ call void @streaming_callee(ptr %X, ptr %Y, <2 x double> %C, <2 x double> %negS, <2 x double> %S, <2 x double> %C)
+ ; Third call - normal function again
+ call void @normal_callee(ptr %X, ptr %Y, <2 x double> %C, <2 x double> %negS, <2 x double> %S, <2 x double> %C)
+ ret void
+}
>From 27bc4b40c2a32bb23db3a31bb3a36d8fd9236b2e Mon Sep 17 00:00:00 2001
From: Amara Emerson <amara at apple.com>
Date: Wed, 7 Jan 2026 02:53:04 -0800
Subject: [PATCH 2/5] formatting
---
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 9e9ef730b869c..1a99e380cfadc 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5702,7 +5702,7 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// In streaming regions, NEON is illegal but streaming-SVE is available.
// Use SVE for copies if we're in a streaming region and SME is available.
if ((Subtarget.isSVEorStreamingSVEAvailable() &&
- !Subtarget.isNeonAvailable()) ||
+ !Subtarget.isNeonAvailable()) ||
isInStreamingCallSiteRegion(MBB, I)) {
BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ))
.addReg(AArch64::Z0 + (DestReg - AArch64::Q0), RegState::Define)
>From 4a58b7466f5ba92cf84c0a63c78b76de88c4bda9 Mon Sep 17 00:00:00 2001
From: Amara Emerson <amara at apple.com>
Date: Wed, 7 Jan 2026 08:23:25 -0800
Subject: [PATCH 3/5] Address review: check hasSMEFA64() before avoiding NEON
copies
With +sme-fa64, NEON is legal in streaming mode, so we don't need
to switch to SVE copies in those cases.
---
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 1a99e380cfadc..c42bd7c14bc3d 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5701,9 +5701,10 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
AArch64::FPR128RegClass.contains(SrcReg)) {
// In streaming regions, NEON is illegal but streaming-SVE is available.
// Use SVE for copies if we're in a streaming region and SME is available.
+ // With +sme-fa64, NEON is legal in streaming mode so we can use it.
if ((Subtarget.isSVEorStreamingSVEAvailable() &&
!Subtarget.isNeonAvailable()) ||
- isInStreamingCallSiteRegion(MBB, I)) {
+ (!Subtarget.hasSMEFA64() && isInStreamingCallSiteRegion(MBB, I))) {
BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ))
.addReg(AArch64::Z0 + (DestReg - AArch64::Q0), RegState::Define)
.addReg(AArch64::Z0 + (SrcReg - AArch64::Q0))
@@ -5734,7 +5735,7 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
!Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable() &&
- !isInStreamingCallSiteRegion(MBB, I)) {
+ (!isInStreamingCallSiteRegion(MBB, I) || Subtarget.hasSMEFA64())) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
@@ -5762,7 +5763,7 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
!Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable() &&
- !isInStreamingCallSiteRegion(MBB, I)) {
+ (!isInStreamingCallSiteRegion(MBB, I) || Subtarget.hasSMEFA64())) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
@@ -5804,7 +5805,7 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
!Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable() &&
- !isInStreamingCallSiteRegion(MBB, I)) {
+ (!isInStreamingCallSiteRegion(MBB, I) || Subtarget.hasSMEFA64())) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
@@ -5846,7 +5847,7 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
!Subtarget.hasZeroCycleRegMoveFPR64() && Subtarget.isNeonAvailable() &&
- !isInStreamingCallSiteRegion(MBB, I)) {
+ (!isInStreamingCallSiteRegion(MBB, I) || Subtarget.hasSMEFA64())) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
>From dcaa9cbcadb6d5474cfe67b4598c734d9ce1ccf7 Mon Sep 17 00:00:00 2001
From: Amara Emerson <amara at apple.com>
Date: Fri, 9 Jan 2026 01:54:06 -0800
Subject: [PATCH 4/5] Add a mustAvoidNeonAtMBBI() wrapper to simplify logic.
---
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index c42bd7c14bc3d..20865d5b3a63a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5414,6 +5414,13 @@ static bool isInStreamingCallSiteRegion(MachineBasicBlock &MBB,
return false;
}
+/// Returns true if in a streaming call site region without SME-FA64.
+static bool mustAvoidNeonAtMBBI(const AArch64Subtarget &Subtarget,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) {
+ return isInStreamingCallSiteRegion(MBB, I) && !Subtarget.hasSMEFA64();
+}
+
void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, Register DestReg,
@@ -5704,7 +5711,7 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
// With +sme-fa64, NEON is legal in streaming mode so we can use it.
if ((Subtarget.isSVEorStreamingSVEAvailable() &&
!Subtarget.isNeonAvailable()) ||
- (!Subtarget.hasSMEFA64() && isInStreamingCallSiteRegion(MBB, I))) {
+ mustAvoidNeonAtMBBI(Subtarget, MBB, I)) {
BuildMI(MBB, I, DL, get(AArch64::ORR_ZZZ))
.addReg(AArch64::Z0 + (DestReg - AArch64::Q0), RegState::Define)
.addReg(AArch64::Z0 + (SrcReg - AArch64::Q0))
@@ -5735,7 +5742,7 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
!Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable() &&
- (!isInStreamingCallSiteRegion(MBB, I) || Subtarget.hasSMEFA64())) {
+ !mustAvoidNeonAtMBBI(Subtarget, MBB, I)) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
@@ -5763,7 +5770,7 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
!Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable() &&
- (!isInStreamingCallSiteRegion(MBB, I) || Subtarget.hasSMEFA64())) {
+ !mustAvoidNeonAtMBBI(Subtarget, MBB, I)) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
@@ -5805,7 +5812,7 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
!Subtarget.hasZeroCycleRegMoveFPR32() && Subtarget.isNeonAvailable() &&
- (!isInStreamingCallSiteRegion(MBB, I) || Subtarget.hasSMEFA64())) {
+ !mustAvoidNeonAtMBBI(Subtarget, MBB, I)) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
@@ -5847,7 +5854,7 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
if (Subtarget.hasZeroCycleRegMoveFPR128() &&
!Subtarget.hasZeroCycleRegMoveFPR64() &&
!Subtarget.hasZeroCycleRegMoveFPR64() && Subtarget.isNeonAvailable() &&
- (!isInStreamingCallSiteRegion(MBB, I) || Subtarget.hasSMEFA64())) {
+ !mustAvoidNeonAtMBBI(Subtarget, MBB, I)) {
MCRegister DestRegQ = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
&AArch64::FPR128RegClass);
MCRegister SrcRegQ = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
>From 4a22a1c7da4a7aaebb0fae61c44c87f584e46418 Mon Sep 17 00:00:00 2001
From: Amara Emerson <amara at apple.com>
Date: Fri, 9 Jan 2026 02:13:31 -0800
Subject: [PATCH 5/5] Swap checks
Co-authored-by: Benjamin Maxwell <benjamin.maxwell at arm.com>
---
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 20865d5b3a63a..f04bbf248ca56 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5418,7 +5418,7 @@ static bool isInStreamingCallSiteRegion(MachineBasicBlock &MBB,
static bool mustAvoidNeonAtMBBI(const AArch64Subtarget &Subtarget,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) {
- return isInStreamingCallSiteRegion(MBB, I) && !Subtarget.hasSMEFA64();
+ return !Subtarget.hasSMEFA64() && isInStreamingCallSiteRegion(MBB, I);
}
void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
More information about the llvm-commits
mailing list