[llvm] [AArch64][GlobalISel] Fix incorrect codegen for FPR16/FPR8 to GPR copies (PR #171499)

Ian Butterworth via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 10 06:56:23 PST 2025


https://github.com/IanButterworth updated https://github.com/llvm/llvm-project/pull/171499

>From 314a54a99c539bf0b683fde3bcb5609cd8997087 Mon Sep 17 00:00:00 2001
From: Ian Butterworth <i.r.butterworth at gmail.com>
Date: Tue, 9 Dec 2025 15:30:05 -0500
Subject: [PATCH 1/6] [AArch64][GlobalISel] Fix incorrect codegen for
 FPR16/FPR8 to GPR copies

Previously, copyPhysReg() was missing handlers for copies between
FPR16/FPR8 and GPR32/GPR64 register classes. These cases fell through
to the NZCV handler, which incorrectly generated 'mrs Rd, NZCV'
instead of the proper FMOV instruction.

This caused incorrect code generation for patterns like:
  %ival = bitcast half %val to i16
  store atomic i16 %ival, ptr %addr release, align 2

Which generated 'mrs w8, NZCV' instead of 'fmov w8, h0'.

The fix adds proper copy handlers:
- FPR16 <-> GPR32: Use FMOVHWr/FMOVWHr with FullFP16, otherwise
  promote to FPR32 super-register and use FMOVSWr/FMOVWSr
- FPR16 <-> GPR64: Use FMOVHXr/FMOVXHr with FullFP16, otherwise
  promote to FPR64 super-register and use FMOVDXr/FMOVXDr
- FPR8 <-> GPR32: Promote to FPR32 and use FMOVSWr/FMOVWSr
- FPR8 <-> GPR64: Promote to FPR64 and use FMOVDXr/FMOVXDr

Fixes https://github.com/llvm/llvm-project/issues/171494

Co-authored-by: Claude <noreply at anthropic.com>
---
 llvm/lib/Target/AArch64/AArch64InstrInfo.cpp  |  96 +++++++++++++++
 .../GlobalISel/arm64-atomic-store-fp16.ll     | 109 ++++++++++++++++++
 2 files changed, 205 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index ac4c1acedf05c..fdab4766aac92 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5804,6 +5804,102 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
     return;
   }
 
+  // Copies between GPR32 and FPR16.
+  if (AArch64::FPR16RegClass.contains(DestReg) &&
+      AArch64::GPR32RegClass.contains(SrcReg)) {
+    if (Subtarget.hasFullFP16()) {
+      BuildMI(MBB, I, DL, get(AArch64::FMOVWHr), DestReg)
+          .addReg(SrcReg, getKillRegState(KillSrc));
+    } else {
+      MCRegister DestRegS =
+          RI.getMatchingSuperReg(DestReg, AArch64::hsub, &AArch64::FPR32RegClass);
+      BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestRegS)
+          .addReg(SrcReg, getKillRegState(KillSrc));
+    }
+    return;
+  }
+  if (AArch64::GPR32RegClass.contains(DestReg) &&
+      AArch64::FPR16RegClass.contains(SrcReg)) {
+    if (Subtarget.hasFullFP16()) {
+      BuildMI(MBB, I, DL, get(AArch64::FMOVHWr), DestReg)
+          .addReg(SrcReg, getKillRegState(KillSrc));
+    } else {
+      MCRegister SrcRegS =
+          RI.getMatchingSuperReg(SrcReg, AArch64::hsub, &AArch64::FPR32RegClass);
+      BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
+          .addReg(SrcRegS, RegState::Undef)
+          .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
+    }
+    return;
+  }
+
+  // Copies between GPR64 and FPR16.
+  if (AArch64::FPR16RegClass.contains(DestReg) &&
+      AArch64::GPR64RegClass.contains(SrcReg)) {
+    if (Subtarget.hasFullFP16()) {
+      BuildMI(MBB, I, DL, get(AArch64::FMOVXHr), DestReg)
+          .addReg(SrcReg, getKillRegState(KillSrc));
+    } else {
+      MCRegister DestRegD =
+          RI.getMatchingSuperReg(DestReg, AArch64::hsub, &AArch64::FPR64RegClass);
+      BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestRegD)
+          .addReg(SrcReg, getKillRegState(KillSrc));
+    }
+    return;
+  }
+  if (AArch64::GPR64RegClass.contains(DestReg) &&
+      AArch64::FPR16RegClass.contains(SrcReg)) {
+    if (Subtarget.hasFullFP16()) {
+      BuildMI(MBB, I, DL, get(AArch64::FMOVHXr), DestReg)
+          .addReg(SrcReg, getKillRegState(KillSrc));
+    } else {
+      MCRegister SrcRegD =
+          RI.getMatchingSuperReg(SrcReg, AArch64::hsub, &AArch64::FPR64RegClass);
+      BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
+          .addReg(SrcRegD, RegState::Undef)
+          .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
+    }
+    return;
+  }
+
+  // Copies between GPR32 and FPR8.
+  if (AArch64::FPR8RegClass.contains(DestReg) &&
+      AArch64::GPR32RegClass.contains(SrcReg)) {
+    MCRegister DestRegS =
+        RI.getMatchingSuperReg(DestReg, AArch64::bsub, &AArch64::FPR32RegClass);
+    BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestRegS)
+        .addReg(SrcReg, getKillRegState(KillSrc));
+    return;
+  }
+  if (AArch64::GPR32RegClass.contains(DestReg) &&
+      AArch64::FPR8RegClass.contains(SrcReg)) {
+    MCRegister SrcRegS =
+        RI.getMatchingSuperReg(SrcReg, AArch64::bsub, &AArch64::FPR32RegClass);
+    BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
+        .addReg(SrcRegS, RegState::Undef)
+        .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
+    return;
+  }
+
+  // Copies between GPR64 and FPR8.
+  if (AArch64::FPR8RegClass.contains(DestReg) &&
+      AArch64::GPR64RegClass.contains(SrcReg)) {
+    MCRegister DestRegD =
+        RI.getMatchingSuperReg(DestReg, AArch64::bsub, &AArch64::FPR64RegClass);
+    BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestRegD)
+        .addReg(SrcReg, getKillRegState(KillSrc));
+    return;
+  }
+  if (AArch64::GPR64RegClass.contains(DestReg) &&
+      AArch64::FPR8RegClass.contains(SrcReg)) {
+    MCRegister SrcRegD =
+        RI.getMatchingSuperReg(SrcReg, AArch64::bsub, &AArch64::FPR64RegClass);
+    BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
+        .addReg(SrcRegD, RegState::Undef)
+        .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
+    return;
+  }
+
   if (DestReg == AArch64::NZCV) {
     assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
     BuildMI(MBB, I, DL, get(AArch64::MSR))
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
new file mode 100644
index 0000000000000..0109b4d7c4c08
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=arm64-apple-ios -global-isel -global-isel-abort=1 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-NOFP16
+; RUN: llc < %s -mtriple=arm64-apple-ios -mattr=+fullfp16 -global-isel -global-isel-abort=1 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-FP16
+
+; Test for https://github.com/llvm/llvm-project/issues/171494
+; Atomic store of bitcast half to i16 was generating incorrect code (mrs instead of fmov).
+
+define void @atomic_store_half(ptr %addr, half %val) {
+; CHECK-NOFP16-LABEL: atomic_store_half:
+; CHECK-NOFP16:       ; %bb.0:
+; CHECK-NOFP16-NEXT:    fmov w8, s0
+; CHECK-NOFP16-NEXT:    stlrh w8, [x0]
+; CHECK-NOFP16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: atomic_store_half:
+; CHECK-FP16:       ; %bb.0:
+; CHECK-FP16-NEXT:    fmov w8, h0
+; CHECK-FP16-NEXT:    stlrh w8, [x0]
+; CHECK-FP16-NEXT:    ret
+  %ival = bitcast half %val to i16
+  store atomic i16 %ival, ptr %addr release, align 2
+  ret void
+}
+
+define half @atomic_load_half(ptr %addr) {
+; CHECK-NOFP16-LABEL: atomic_load_half:
+; CHECK-NOFP16:       ; %bb.0:
+; CHECK-NOFP16-NEXT:    ldarh w8, [x0]
+; CHECK-NOFP16-NEXT:    fmov s0, w8
+; CHECK-NOFP16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: atomic_load_half:
+; CHECK-FP16:       ; %bb.0:
+; CHECK-FP16-NEXT:    ldarh w8, [x0]
+; CHECK-FP16-NEXT:    fmov h0, w8
+; CHECK-FP16-NEXT:    ret
+  %ival = load atomic i16, ptr %addr acquire, align 2
+  %val = bitcast i16 %ival to half
+  ret half %val
+}
+
+define void @atomic_store_bfloat(ptr %addr, bfloat %val) {
+; CHECK-NOFP16-LABEL: atomic_store_bfloat:
+; CHECK-NOFP16:       ; %bb.0:
+; CHECK-NOFP16-NEXT:    fmov w8, s0
+; CHECK-NOFP16-NEXT:    stlrh w8, [x0]
+; CHECK-NOFP16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: atomic_store_bfloat:
+; CHECK-FP16:       ; %bb.0:
+; CHECK-FP16-NEXT:    fmov w8, h0
+; CHECK-FP16-NEXT:    stlrh w8, [x0]
+; CHECK-FP16-NEXT:    ret
+  %ival = bitcast bfloat %val to i16
+  store atomic i16 %ival, ptr %addr release, align 2
+  ret void
+}
+
+define bfloat @atomic_load_bfloat(ptr %addr) {
+; CHECK-NOFP16-LABEL: atomic_load_bfloat:
+; CHECK-NOFP16:       ; %bb.0:
+; CHECK-NOFP16-NEXT:    ldarh w8, [x0]
+; CHECK-NOFP16-NEXT:    fmov s0, w8
+; CHECK-NOFP16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: atomic_load_bfloat:
+; CHECK-FP16:       ; %bb.0:
+; CHECK-FP16-NEXT:    ldarh w8, [x0]
+; CHECK-FP16-NEXT:    fmov h0, w8
+; CHECK-FP16-NEXT:    ret
+  %ival = load atomic i16, ptr %addr acquire, align 2
+  %val = bitcast i16 %ival to bfloat
+  ret bfloat %val
+}
+
+; Test FPR8 to GPR32 copies (bitcast <1 x i8> to i8 for atomic store)
+define void @atomic_store_v1i8(ptr %addr, <1 x i8> %val) {
+; CHECK-NOFP16-LABEL: atomic_store_v1i8:
+; CHECK-NOFP16:       ; %bb.0:
+; CHECK-NOFP16-NEXT:    fmov w8, s0
+; CHECK-NOFP16-NEXT:    stlrb w8, [x0]
+; CHECK-NOFP16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: atomic_store_v1i8:
+; CHECK-FP16:       ; %bb.0:
+; CHECK-FP16-NEXT:    fmov w8, s0
+; CHECK-FP16-NEXT:    stlrb w8, [x0]
+; CHECK-FP16-NEXT:    ret
+  %ival = bitcast <1 x i8> %val to i8
+  store atomic i8 %ival, ptr %addr release, align 1
+  ret void
+}
+
+define <1 x i8> @atomic_load_v1i8(ptr %addr) {
+; CHECK-NOFP16-LABEL: atomic_load_v1i8:
+; CHECK-NOFP16:       ; %bb.0:
+; CHECK-NOFP16-NEXT:    ldarb w8, [x0]
+; CHECK-NOFP16-NEXT:    fmov s0, w8
+; CHECK-NOFP16-NEXT:    ret
+;
+; CHECK-FP16-LABEL: atomic_load_v1i8:
+; CHECK-FP16:       ; %bb.0:
+; CHECK-FP16-NEXT:    ldarb w8, [x0]
+; CHECK-FP16-NEXT:    fmov s0, w8
+; CHECK-FP16-NEXT:    ret
+  %ival = load atomic i8, ptr %addr acquire, align 1
+  %val = bitcast i8 %ival to <1 x i8>
+  ret <1 x i8> %val
+}

>From c2d186fc1a72a674783e3658c90372c4b2094682 Mon Sep 17 00:00:00 2001
From: Ian Butterworth <i.r.butterworth at gmail.com>
Date: Tue, 9 Dec 2025 19:20:23 -0500
Subject: [PATCH 2/6] Fix test expectations and code formatting

- Fix LLVM code style: use trailing parameter alignment for
  getMatchingSuperReg calls instead of assignment-aligned continuation
- Update CHECK-FP16 expectations for atomic_load_half/bfloat: GlobalISel
  allocates FPR32 (s0) for GPR->FPR copies even with FullFP16, then
  narrows to h0 via kill annotation. The FullFP16 optimization only
  applies to FPR->GPR (store) direction where the value arrives in h0.
- Add kill annotations to CHECK-NOFP16 load tests to match actual output

Co-Authored-By: Claude <noreply at anthropic.com>
---
 llvm/lib/Target/AArch64/AArch64InstrInfo.cpp     | 16 ++++++++--------
 .../GlobalISel/arm64-atomic-store-fp16.ll        |  8 ++++++--
 2 files changed, 14 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index fdab4766aac92..e6204bb99e52f 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5811,8 +5811,8 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
       BuildMI(MBB, I, DL, get(AArch64::FMOVWHr), DestReg)
           .addReg(SrcReg, getKillRegState(KillSrc));
     } else {
-      MCRegister DestRegS =
-          RI.getMatchingSuperReg(DestReg, AArch64::hsub, &AArch64::FPR32RegClass);
+      MCRegister DestRegS = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
+                                                   &AArch64::FPR32RegClass);
       BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestRegS)
           .addReg(SrcReg, getKillRegState(KillSrc));
     }
@@ -5824,8 +5824,8 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
       BuildMI(MBB, I, DL, get(AArch64::FMOVHWr), DestReg)
           .addReg(SrcReg, getKillRegState(KillSrc));
     } else {
-      MCRegister SrcRegS =
-          RI.getMatchingSuperReg(SrcReg, AArch64::hsub, &AArch64::FPR32RegClass);
+      MCRegister SrcRegS = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
+                                                  &AArch64::FPR32RegClass);
       BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
           .addReg(SrcRegS, RegState::Undef)
           .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
@@ -5840,8 +5840,8 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
       BuildMI(MBB, I, DL, get(AArch64::FMOVXHr), DestReg)
           .addReg(SrcReg, getKillRegState(KillSrc));
     } else {
-      MCRegister DestRegD =
-          RI.getMatchingSuperReg(DestReg, AArch64::hsub, &AArch64::FPR64RegClass);
+      MCRegister DestRegD = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
+                                                   &AArch64::FPR64RegClass);
       BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestRegD)
           .addReg(SrcReg, getKillRegState(KillSrc));
     }
@@ -5853,8 +5853,8 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
       BuildMI(MBB, I, DL, get(AArch64::FMOVHXr), DestReg)
           .addReg(SrcReg, getKillRegState(KillSrc));
     } else {
-      MCRegister SrcRegD =
-          RI.getMatchingSuperReg(SrcReg, AArch64::hsub, &AArch64::FPR64RegClass);
+      MCRegister SrcRegD = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
+                                                  &AArch64::FPR64RegClass);
       BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
           .addReg(SrcRegD, RegState::Undef)
           .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
index 0109b4d7c4c08..8839165c81ca3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
@@ -27,12 +27,14 @@ define half @atomic_load_half(ptr %addr) {
 ; CHECK-NOFP16:       ; %bb.0:
 ; CHECK-NOFP16-NEXT:    ldarh w8, [x0]
 ; CHECK-NOFP16-NEXT:    fmov s0, w8
+; CHECK-NOFP16-NEXT:    ; kill: def $h0 killed $h0 killed $s0
 ; CHECK-NOFP16-NEXT:    ret
 ;
 ; CHECK-FP16-LABEL: atomic_load_half:
 ; CHECK-FP16:       ; %bb.0:
 ; CHECK-FP16-NEXT:    ldarh w8, [x0]
-; CHECK-FP16-NEXT:    fmov h0, w8
+; CHECK-FP16-NEXT:    fmov s0, w8
+; CHECK-FP16-NEXT:    ; kill: def $h0 killed $h0 killed $s0
 ; CHECK-FP16-NEXT:    ret
   %ival = load atomic i16, ptr %addr acquire, align 2
   %val = bitcast i16 %ival to half
@@ -61,12 +63,14 @@ define bfloat @atomic_load_bfloat(ptr %addr) {
 ; CHECK-NOFP16:       ; %bb.0:
 ; CHECK-NOFP16-NEXT:    ldarh w8, [x0]
 ; CHECK-NOFP16-NEXT:    fmov s0, w8
+; CHECK-NOFP16-NEXT:    ; kill: def $h0 killed $h0 killed $s0
 ; CHECK-NOFP16-NEXT:    ret
 ;
 ; CHECK-FP16-LABEL: atomic_load_bfloat:
 ; CHECK-FP16:       ; %bb.0:
 ; CHECK-FP16-NEXT:    ldarh w8, [x0]
-; CHECK-FP16-NEXT:    fmov h0, w8
+; CHECK-FP16-NEXT:    fmov s0, w8
+; CHECK-FP16-NEXT:    ; kill: def $h0 killed $h0 killed $s0
 ; CHECK-FP16-NEXT:    ret
   %ival = load atomic i16, ptr %addr acquire, align 2
   %val = bitcast i16 %ival to bfloat

>From 984a9771b84e88124db40318d6c3cf11cdb6abc8 Mon Sep 17 00:00:00 2001
From: Ian Butterworth <i.r.butterworth at gmail.com>
Date: Wed, 10 Dec 2025 02:50:53 -0500
Subject: [PATCH 3/6] [AArch64][GlobalISel] Alternative fix: widen atomic
 stores in legalizer

Per review suggestion from @xal-0, handle the atomic store issue in the
legalizer instead of copyPhysReg. This adds a widenScalarIf rule for
G_STORE that widens scalar types narrower than 32 bits to s32 for atomic
stores with release ordering or stronger.

This approach:
- Fixes the original bug (mrs instead of fmov for half->i16 atomic store)
- Enables stlurb/stlurh codegen for GISel (removes two TODOs)
- Is a more appropriate place to handle type legalization

Co-authored-by: Claude <noreply at anthropic.com>
---
 llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp    | 4 ++++
 .../AArch64/Atomics/aarch64-atomic-store-rcpc_immo.ll     | 8 ++------
 .../CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll | 4 ++--
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 44a148940ec96..1761b356b6558 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -568,6 +568,10 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
         return Query.Types[0] == s128 &&
                Query.MMODescrs[0].Ordering != AtomicOrdering::NotAtomic;
       })
+      .widenScalarIf(
+          all(scalarNarrowerThan(0, 32),
+              atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Release)),
+          changeTo(0, s32))
       .legalForTypesWithMemDesc(
           {{s8, p0, s8, 8},     {s16, p0, s8, 8},  // truncstorei8 from s16
            {s32, p0, s8, 8},                       // truncstorei8 from s32
diff --git a/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-store-rcpc_immo.ll b/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-store-rcpc_immo.ll
index c80b18d178883..de12866fc2f4b 100644
--- a/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-store-rcpc_immo.ll
+++ b/llvm/test/CodeGen/AArch64/Atomics/aarch64-atomic-store-rcpc_immo.ll
@@ -357,12 +357,10 @@ define void @store_atomic_i128_unaligned_seq_cst(i128 %value, ptr %ptr) {
     ret void
 }
 
-; TODO: missed opportunity to emit a stlurb w/ GISel
 define void @store_atomic_i8_from_gep() {
 ; GISEL-LABEL: store_atomic_i8_from_gep:
 ; GISEL:    bl init
-; GISEL:    add x9, x8, #1
-; GISEL:    stlrb w8, [x9]
+; GISEL:    stlurb w8, [x9, #1]
 ;
 ; SDAG-LABEL: store_atomic_i8_from_gep:
 ; SDAG:    bl init
@@ -374,12 +372,10 @@ define void @store_atomic_i8_from_gep() {
   ret void
 }
 
-; TODO: missed opportunity to emit a stlurh w/ GISel
 define void @store_atomic_i16_from_gep() {
 ; GISEL-LABEL: store_atomic_i16_from_gep:
 ; GISEL:    bl init
-; GISEL:    add x9, x8, #2
-; GISEL:    stlrh w8, [x9]
+; GISEL:    stlurh w8, [x9, #2]
 ;
 ; SDAG-LABEL: store_atomic_i16_from_gep:
 ; SDAG:    bl init
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
index 8839165c81ca3..0254359a7db86 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
@@ -14,7 +14,7 @@ define void @atomic_store_half(ptr %addr, half %val) {
 ;
 ; CHECK-FP16-LABEL: atomic_store_half:
 ; CHECK-FP16:       ; %bb.0:
-; CHECK-FP16-NEXT:    fmov w8, h0
+; CHECK-FP16-NEXT:    fmov w8, s0
 ; CHECK-FP16-NEXT:    stlrh w8, [x0]
 ; CHECK-FP16-NEXT:    ret
   %ival = bitcast half %val to i16
@@ -50,7 +50,7 @@ define void @atomic_store_bfloat(ptr %addr, bfloat %val) {
 ;
 ; CHECK-FP16-LABEL: atomic_store_bfloat:
 ; CHECK-FP16:       ; %bb.0:
-; CHECK-FP16-NEXT:    fmov w8, h0
+; CHECK-FP16-NEXT:    fmov w8, s0
 ; CHECK-FP16-NEXT:    stlrh w8, [x0]
 ; CHECK-FP16-NEXT:    ret
   %ival = bitcast bfloat %val to i16

>From 07ca6661f81706a9ef0ae3a25864db26c24616a8 Mon Sep 17 00:00:00 2001
From: Ian Butterworth <i.r.butterworth at gmail.com>
Date: Wed, 10 Dec 2025 09:22:09 -0500
Subject: [PATCH 4/6] Fix test expectations to match actual codegen

- Add kill annotations for store tests (def $h0 killed $h0 def $s0)
- Remove v1i8 tests: they use umov.b (vector extract), not the
  FPR8->GPR copy path, so they don't test the fixes in this PR

Co-authored-by: Claude <noreply at anthropic.com>
---
 .../GlobalISel/arm64-atomic-store-fp16.ll     | 39 ++-----------------
 1 file changed, 4 insertions(+), 35 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
index 0254359a7db86..a570e2eb592d5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
@@ -8,12 +8,14 @@
 define void @atomic_store_half(ptr %addr, half %val) {
 ; CHECK-NOFP16-LABEL: atomic_store_half:
 ; CHECK-NOFP16:       ; %bb.0:
+; CHECK-NOFP16-NEXT:    ; kill: def $h0 killed $h0 def $s0
 ; CHECK-NOFP16-NEXT:    fmov w8, s0
 ; CHECK-NOFP16-NEXT:    stlrh w8, [x0]
 ; CHECK-NOFP16-NEXT:    ret
 ;
 ; CHECK-FP16-LABEL: atomic_store_half:
 ; CHECK-FP16:       ; %bb.0:
+; CHECK-FP16-NEXT:    ; kill: def $h0 killed $h0 def $s0
 ; CHECK-FP16-NEXT:    fmov w8, s0
 ; CHECK-FP16-NEXT:    stlrh w8, [x0]
 ; CHECK-FP16-NEXT:    ret
@@ -44,12 +46,14 @@ define half @atomic_load_half(ptr %addr) {
 define void @atomic_store_bfloat(ptr %addr, bfloat %val) {
 ; CHECK-NOFP16-LABEL: atomic_store_bfloat:
 ; CHECK-NOFP16:       ; %bb.0:
+; CHECK-NOFP16-NEXT:    ; kill: def $h0 killed $h0 def $s0
 ; CHECK-NOFP16-NEXT:    fmov w8, s0
 ; CHECK-NOFP16-NEXT:    stlrh w8, [x0]
 ; CHECK-NOFP16-NEXT:    ret
 ;
 ; CHECK-FP16-LABEL: atomic_store_bfloat:
 ; CHECK-FP16:       ; %bb.0:
+; CHECK-FP16-NEXT:    ; kill: def $h0 killed $h0 def $s0
 ; CHECK-FP16-NEXT:    fmov w8, s0
 ; CHECK-FP16-NEXT:    stlrh w8, [x0]
 ; CHECK-FP16-NEXT:    ret
@@ -76,38 +80,3 @@ define bfloat @atomic_load_bfloat(ptr %addr) {
   %val = bitcast i16 %ival to bfloat
   ret bfloat %val
 }
-
-; Test FPR8 to GPR32 copies (bitcast <1 x i8> to i8 for atomic store)
-define void @atomic_store_v1i8(ptr %addr, <1 x i8> %val) {
-; CHECK-NOFP16-LABEL: atomic_store_v1i8:
-; CHECK-NOFP16:       ; %bb.0:
-; CHECK-NOFP16-NEXT:    fmov w8, s0
-; CHECK-NOFP16-NEXT:    stlrb w8, [x0]
-; CHECK-NOFP16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: atomic_store_v1i8:
-; CHECK-FP16:       ; %bb.0:
-; CHECK-FP16-NEXT:    fmov w8, s0
-; CHECK-FP16-NEXT:    stlrb w8, [x0]
-; CHECK-FP16-NEXT:    ret
-  %ival = bitcast <1 x i8> %val to i8
-  store atomic i8 %ival, ptr %addr release, align 1
-  ret void
-}
-
-define <1 x i8> @atomic_load_v1i8(ptr %addr) {
-; CHECK-NOFP16-LABEL: atomic_load_v1i8:
-; CHECK-NOFP16:       ; %bb.0:
-; CHECK-NOFP16-NEXT:    ldarb w8, [x0]
-; CHECK-NOFP16-NEXT:    fmov s0, w8
-; CHECK-NOFP16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: atomic_load_v1i8:
-; CHECK-FP16:       ; %bb.0:
-; CHECK-FP16-NEXT:    ldarb w8, [x0]
-; CHECK-FP16-NEXT:    fmov s0, w8
-; CHECK-FP16-NEXT:    ret
-  %ival = load atomic i8, ptr %addr acquire, align 1
-  %val = bitcast i8 %ival to <1 x i8>
-  ret <1 x i8> %val
-}

>From ab2bd338b4e470c684d02e095e8ab3b2110c53ea Mon Sep 17 00:00:00 2001
From: Ian Butterworth <i.r.butterworth at gmail.com>
Date: Wed, 10 Dec 2025 09:52:13 -0500
Subject: [PATCH 5/6] Address reviewer feedback

- Remove copyPhysReg changes: per @arsenm and @davemgreen, copyPhysReg
  shouldn't need to handle new situations for GlobalISel, and copies
  should only be between elements of the same size
- Move widenScalarIf rule to end of G_STORE legalizer chain: per @arsenm,
  legal cases should be listed first, modifying actions should be near end
- Remove -verify-machineinstrs from test RUN lines: per @arsenm suggestion

Co-authored-by: Claude <noreply at anthropic.com>
---
 llvm/lib/Target/AArch64/AArch64InstrInfo.cpp  | 96 -------------------
 .../AArch64/GISel/AArch64LegalizerInfo.cpp    |  8 +-
 .../GlobalISel/arm64-atomic-store-fp16.ll     |  4 +-
 3 files changed, 6 insertions(+), 102 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index e6204bb99e52f..ac4c1acedf05c 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5804,102 +5804,6 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
     return;
   }
 
-  // Copies between GPR32 and FPR16.
-  if (AArch64::FPR16RegClass.contains(DestReg) &&
-      AArch64::GPR32RegClass.contains(SrcReg)) {
-    if (Subtarget.hasFullFP16()) {
-      BuildMI(MBB, I, DL, get(AArch64::FMOVWHr), DestReg)
-          .addReg(SrcReg, getKillRegState(KillSrc));
-    } else {
-      MCRegister DestRegS = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
-                                                   &AArch64::FPR32RegClass);
-      BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestRegS)
-          .addReg(SrcReg, getKillRegState(KillSrc));
-    }
-    return;
-  }
-  if (AArch64::GPR32RegClass.contains(DestReg) &&
-      AArch64::FPR16RegClass.contains(SrcReg)) {
-    if (Subtarget.hasFullFP16()) {
-      BuildMI(MBB, I, DL, get(AArch64::FMOVHWr), DestReg)
-          .addReg(SrcReg, getKillRegState(KillSrc));
-    } else {
-      MCRegister SrcRegS = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
-                                                  &AArch64::FPR32RegClass);
-      BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
-          .addReg(SrcRegS, RegState::Undef)
-          .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
-    }
-    return;
-  }
-
-  // Copies between GPR64 and FPR16.
-  if (AArch64::FPR16RegClass.contains(DestReg) &&
-      AArch64::GPR64RegClass.contains(SrcReg)) {
-    if (Subtarget.hasFullFP16()) {
-      BuildMI(MBB, I, DL, get(AArch64::FMOVXHr), DestReg)
-          .addReg(SrcReg, getKillRegState(KillSrc));
-    } else {
-      MCRegister DestRegD = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
-                                                   &AArch64::FPR64RegClass);
-      BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestRegD)
-          .addReg(SrcReg, getKillRegState(KillSrc));
-    }
-    return;
-  }
-  if (AArch64::GPR64RegClass.contains(DestReg) &&
-      AArch64::FPR16RegClass.contains(SrcReg)) {
-    if (Subtarget.hasFullFP16()) {
-      BuildMI(MBB, I, DL, get(AArch64::FMOVHXr), DestReg)
-          .addReg(SrcReg, getKillRegState(KillSrc));
-    } else {
-      MCRegister SrcRegD = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
-                                                  &AArch64::FPR64RegClass);
-      BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
-          .addReg(SrcRegD, RegState::Undef)
-          .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
-    }
-    return;
-  }
-
-  // Copies between GPR32 and FPR8.
-  if (AArch64::FPR8RegClass.contains(DestReg) &&
-      AArch64::GPR32RegClass.contains(SrcReg)) {
-    MCRegister DestRegS =
-        RI.getMatchingSuperReg(DestReg, AArch64::bsub, &AArch64::FPR32RegClass);
-    BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestRegS)
-        .addReg(SrcReg, getKillRegState(KillSrc));
-    return;
-  }
-  if (AArch64::GPR32RegClass.contains(DestReg) &&
-      AArch64::FPR8RegClass.contains(SrcReg)) {
-    MCRegister SrcRegS =
-        RI.getMatchingSuperReg(SrcReg, AArch64::bsub, &AArch64::FPR32RegClass);
-    BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
-        .addReg(SrcRegS, RegState::Undef)
-        .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
-    return;
-  }
-
-  // Copies between GPR64 and FPR8.
-  if (AArch64::FPR8RegClass.contains(DestReg) &&
-      AArch64::GPR64RegClass.contains(SrcReg)) {
-    MCRegister DestRegD =
-        RI.getMatchingSuperReg(DestReg, AArch64::bsub, &AArch64::FPR64RegClass);
-    BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestRegD)
-        .addReg(SrcReg, getKillRegState(KillSrc));
-    return;
-  }
-  if (AArch64::GPR64RegClass.contains(DestReg) &&
-      AArch64::FPR8RegClass.contains(SrcReg)) {
-    MCRegister SrcRegD =
-        RI.getMatchingSuperReg(SrcReg, AArch64::bsub, &AArch64::FPR64RegClass);
-    BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
-        .addReg(SrcRegD, RegState::Undef)
-        .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
-    return;
-  }
-
   if (DestReg == AArch64::NZCV) {
     assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
     BuildMI(MBB, I, DL, get(AArch64::MSR))
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 1761b356b6558..80d691903880c 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -568,10 +568,6 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
         return Query.Types[0] == s128 &&
                Query.MMODescrs[0].Ordering != AtomicOrdering::NotAtomic;
       })
-      .widenScalarIf(
-          all(scalarNarrowerThan(0, 32),
-              atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Release)),
-          changeTo(0, s32))
       .legalForTypesWithMemDesc(
           {{s8, p0, s8, 8},     {s16, p0, s8, 8},  // truncstorei8 from s16
            {s32, p0, s8, 8},                       // truncstorei8 from s32
@@ -619,6 +615,10 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
       .customIf(IsPtrVecPred)
       .scalarizeIf(typeInSet(0, {v2s16, v2s8}), 0)
       .scalarizeIf(scalarOrEltWiderThan(0, 64), 0)
+      .widenScalarIf(
+          all(scalarNarrowerThan(0, 32),
+              atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Release)),
+          changeTo(0, s32))
       .lower();
 
   getActionDefinitionsBuilder(G_INDEXED_STORE)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
index a570e2eb592d5..5273a6560f2b7 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=arm64-apple-ios -global-isel -global-isel-abort=1 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-NOFP16
-; RUN: llc < %s -mtriple=arm64-apple-ios -mattr=+fullfp16 -global-isel -global-isel-abort=1 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-FP16
+; RUN: llc < %s -mtriple=arm64-apple-ios -global-isel -global-isel-abort=1 | FileCheck %s --check-prefix=CHECK-NOFP16
+; RUN: llc < %s -mtriple=arm64-apple-ios -mattr=+fullfp16 -global-isel -global-isel-abort=1 | FileCheck %s --check-prefix=CHECK-FP16
 
 ; Test for https://github.com/llvm/llvm-project/issues/171494
 ; Atomic store of bitcast half to i16 was generating incorrect code (mrs instead of fmov).

>From 07c8acc762012742015cff8703428f2ccc41171d Mon Sep 17 00:00:00 2001
From: Ian Butterworth <i.r.butterworth at gmail.com>
Date: Wed, 10 Dec 2025 09:56:07 -0500
Subject: [PATCH 6/6] Remove atomic load tests that don't exercise the fix

The legalizer widenScalarIf rule only applies to G_STORE with release+
ordering, so the atomic_load_half and atomic_load_bfloat tests don't
actually test the fix.

Co-authored-by: Claude <noreply at anthropic.com>
---
 .../GlobalISel/arm64-atomic-store-fp16.ll     | 38 -------------------
 1 file changed, 38 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
index 5273a6560f2b7..6156b2587df68 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-store-fp16.ll
@@ -24,25 +24,6 @@ define void @atomic_store_half(ptr %addr, half %val) {
   ret void
 }
 
-define half @atomic_load_half(ptr %addr) {
-; CHECK-NOFP16-LABEL: atomic_load_half:
-; CHECK-NOFP16:       ; %bb.0:
-; CHECK-NOFP16-NEXT:    ldarh w8, [x0]
-; CHECK-NOFP16-NEXT:    fmov s0, w8
-; CHECK-NOFP16-NEXT:    ; kill: def $h0 killed $h0 killed $s0
-; CHECK-NOFP16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: atomic_load_half:
-; CHECK-FP16:       ; %bb.0:
-; CHECK-FP16-NEXT:    ldarh w8, [x0]
-; CHECK-FP16-NEXT:    fmov s0, w8
-; CHECK-FP16-NEXT:    ; kill: def $h0 killed $h0 killed $s0
-; CHECK-FP16-NEXT:    ret
-  %ival = load atomic i16, ptr %addr acquire, align 2
-  %val = bitcast i16 %ival to half
-  ret half %val
-}
-
 define void @atomic_store_bfloat(ptr %addr, bfloat %val) {
 ; CHECK-NOFP16-LABEL: atomic_store_bfloat:
 ; CHECK-NOFP16:       ; %bb.0:
@@ -61,22 +42,3 @@ define void @atomic_store_bfloat(ptr %addr, bfloat %val) {
   store atomic i16 %ival, ptr %addr release, align 2
   ret void
 }
-
-define bfloat @atomic_load_bfloat(ptr %addr) {
-; CHECK-NOFP16-LABEL: atomic_load_bfloat:
-; CHECK-NOFP16:       ; %bb.0:
-; CHECK-NOFP16-NEXT:    ldarh w8, [x0]
-; CHECK-NOFP16-NEXT:    fmov s0, w8
-; CHECK-NOFP16-NEXT:    ; kill: def $h0 killed $h0 killed $s0
-; CHECK-NOFP16-NEXT:    ret
-;
-; CHECK-FP16-LABEL: atomic_load_bfloat:
-; CHECK-FP16:       ; %bb.0:
-; CHECK-FP16-NEXT:    ldarh w8, [x0]
-; CHECK-FP16-NEXT:    fmov s0, w8
-; CHECK-FP16-NEXT:    ; kill: def $h0 killed $h0 killed $s0
-; CHECK-FP16-NEXT:    ret
-  %ival = load atomic i16, ptr %addr acquire, align 2
-  %val = bitcast i16 %ival to bfloat
-  ret bfloat %val
-}



More information about the llvm-commits mailing list