[llvm] [AArch64] Fix regression from “Fold scalar-to-vector shuffles into DUP/FMOV" (PR #178227)

Amina Chabane via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 19 09:26:06 PST 2026


https://github.com/Amichaxx updated https://github.com/llvm/llvm-project/pull/178227

>From 4bd80c5606547f5c5f63a2601642233babff24b7 Mon Sep 17 00:00:00 2001
From: Amichaxx <amina.chabane at arm.com>
Date: Tue, 27 Jan 2026 14:09:01 +0000
Subject: [PATCH 1/7] =?UTF-8?q?[AArch64]=20Fix=20regression=20from=20?=
 =?UTF-8?q?=E2=80=9CFold=20scalar-to-vector=20shuffles=20into=20DUP/FMOV?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This patch aims to fix the original compile time regression by restricting the optimisation to run only on non-constant splats.
Without the guard, an infinite loop is caused because the CONCAT(SCALAR_TO_VECTOR, zero) folds back into the same BUILD_VECTOR and
immediately re-enters LowerBUILD_VECTOR.

This patch was tested with the original TensorFlow reproduction provided on the PR and shows a (very) slight improvement on
compile-time.
---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 27 ++++++
 .../Target/AArch64/AArch64MIPeepholeOpt.cpp   | 51 ++++++++++-
 llvm/test/CodeGen/AArch64/aarch64-addv.ll     |  9 +-
 .../AArch64/aarch64-matrix-umull-smull.ll     | 15 ++-
 .../CodeGen/AArch64/arm64-vector-insertion.ll |  5 +-
 llvm/test/CodeGen/AArch64/bitcast-extend.ll   |  3 +-
 llvm/test/CodeGen/AArch64/ctpop.ll            |  3 +-
 llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll | 48 +++++-----
 .../implicitly-set-zero-high-64-bits.ll       |  3 -
 .../AArch64/neon-lowhalf128-optimisation.ll   | 91 +++++++++++++++++++
 .../CodeGen/AArch64/peephole-insvigpr.mir     | 51 +++++++++++
 11 files changed, 253 insertions(+), 53 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/neon-lowhalf128-optimisation.ll

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7af6db793892b..d4e4da09ac551 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16222,6 +16222,33 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
     return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
   }
 
+  if (VT.isFixedLengthVector() && VT.getSizeInBits() == 128 && NumElts != 0) {
+    const unsigned HalfElts = NumElts >> 1;
+    auto IsZero = [&](SDValue V) {
+      return isNullConstant(V) || isNullFPConstant(V);
+    };
+    SDValue FirstVal = Op.getOperand(0);
+    if (!isIntOrFPConstant(FirstVal) &&
+        llvm::all_of(llvm::seq<unsigned>(0, NumElts), [&](unsigned I) {
+          SDValue Vi = Op.getOperand(I);
+          return I < HalfElts ? (Vi == FirstVal) : IsZero(Vi);
+        })) {
+      EVT LaneVT = VT.getVectorElementType();
+      EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
+
+      SDValue HiZero = LaneVT.isInteger()
+                           ? DAG.getConstant(0, DL, HalfVT)
+                           : DAG.getConstantFP(0.0, DL, HalfVT);
+
+      SDValue LoHalf =
+          LaneVT.getSizeInBits() == 64
+              ? DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, HalfVT, FirstVal)
+              : DAG.getNode(AArch64ISD::DUP, DL, HalfVT, FirstVal);
+
+      return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoHalf, HiZero);
+    }
+  }
+
   if (AllLanesExtractElt) {
     SDNode *Vector = nullptr;
     bool Even = false;
diff --git a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
index 21ff921da9b8a..398273babe1b1 100644
--- a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
@@ -687,14 +687,57 @@ bool AArch64MIPeepholeOpt::visitINSviGPR(MachineInstr &MI, unsigned Opc) {
 }
 
 // All instructions that set a FPR64 will implicitly zero the top bits of the
-// register.
+// register. When the def is expressed as a COPY from a GPR, turn it into an
+// explicit FMOV so it cannot be elided later in further passes.
 static bool is64bitDefwithZeroHigh64bit(MachineInstr *MI,
-                                        MachineRegisterInfo *MRI) {
+                                        MachineRegisterInfo *MRI,
+                                        const AArch64InstrInfo *TII) {
   if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
     return false;
   const TargetRegisterClass *RC = MRI->getRegClass(MI->getOperand(0).getReg());
   if (RC != &AArch64::FPR64RegClass)
     return false;
+  if (MI->getOpcode() == TargetOpcode::COPY) {
+    MachineOperand &SrcOp = MI->getOperand(1);
+    if (!SrcOp.isReg())
+      return false;
+    if (SrcOp.getSubReg())
+      return false;
+    Register SrcReg = SrcOp.getReg();
+    auto IsGPR64Like = [&]() -> bool {
+      if (SrcReg.isVirtual())
+        return AArch64::GPR64allRegClass.hasSubClassEq(
+            MRI->getRegClass(SrcReg));
+      return AArch64::GPR64allRegClass.contains(SrcReg);
+    };
+    if (!IsGPR64Like())
+      return false;
+    assert(TII && "Expected InstrInfo when materializing COPYs");
+    // FMOVXDr insists on strict GPR64 operands, so fix up the COPY source.
+    MachineOperand &SrcMO = MI->getOperand(1);
+    bool SrcKill = SrcMO.isKill();
+    if (SrcReg.isVirtual()) {
+      if (MRI->getRegClass(SrcReg) != &AArch64::GPR64RegClass) {
+        // Pass the value through a temporary GPR64 vreg to satisfy the
+        // verifier.
+        Register NewSrc = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
+        BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
+                TII->get(TargetOpcode::COPY), NewSrc)
+            .addReg(SrcReg, getKillRegState(SrcKill));
+        SrcReg = NewSrc;
+        SrcKill = true;
+      }
+    } else if (!AArch64::GPR64RegClass.contains(SrcReg)) {
+      return false;
+    }
+    SrcMO.setReg(SrcReg);
+    SrcMO.setSubReg(0);
+    SrcMO.setIsKill(SrcKill);
+    // Replace the COPY with an explicit FMOV so the zeroing behaviour stays
+    // visible.
+    MI->setDesc(TII->get(AArch64::FMOVXDr));
+    return true;
+  }
   return MI->getOpcode() > TargetOpcode::GENERIC_OP_END;
 }
 
@@ -710,7 +753,7 @@ bool AArch64MIPeepholeOpt::visitINSvi64lane(MachineInstr &MI) {
   if (Low64MI->getOpcode() != AArch64::INSERT_SUBREG)
     return false;
   Low64MI = MRI->getUniqueVRegDef(Low64MI->getOperand(2).getReg());
-  if (!Low64MI || !is64bitDefwithZeroHigh64bit(Low64MI, MRI))
+  if (!Low64MI || !is64bitDefwithZeroHigh64bit(Low64MI, MRI, TII))
     return false;
 
   // Check there is `mov 0` MI for high 64-bits.
@@ -751,7 +794,7 @@ bool AArch64MIPeepholeOpt::visitINSvi64lane(MachineInstr &MI) {
 bool AArch64MIPeepholeOpt::visitFMOVDr(MachineInstr &MI) {
   // An FMOVDr sets the high 64-bits to zero implicitly, similar to ORR for GPR.
   MachineInstr *Low64MI = MRI->getUniqueVRegDef(MI.getOperand(1).getReg());
-  if (!Low64MI || !is64bitDefwithZeroHigh64bit(Low64MI, MRI))
+  if (!Low64MI || !is64bitDefwithZeroHigh64bit(Low64MI, MRI, TII))
     return false;
 
   // Let's remove MIs for high 64-bits.
diff --git a/llvm/test/CodeGen/AArch64/aarch64-addv.ll b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
index 4ae341c7c5401..de68a79824eb3 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-addv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
@@ -543,9 +543,8 @@ define i8 @addv_zero_lanes_negative_v8i8(ptr %arr)  {
 define i8 @addv_zero_lanes_v16i8(ptr %arr)  {
 ; CHECK-SD-LABEL: addv_zero_lanes_v16i8:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-SD-NEXT:    ldrb w8, [x0]
-; CHECK-SD-NEXT:    mov v0.d[0], x8
+; CHECK-SD-NEXT:    fmov d0, x8
 ; CHECK-SD-NEXT:    addv b0, v0.16b
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -568,9 +567,8 @@ define i8 @addv_zero_lanes_v16i8(ptr %arr)  {
 define i16 @addv_zero_lanes_v8i16(ptr %arr)  {
 ; CHECK-SD-LABEL: addv_zero_lanes_v8i16:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-SD-NEXT:    ldrh w8, [x0]
-; CHECK-SD-NEXT:    mov v0.d[0], x8
+; CHECK-SD-NEXT:    fmov d0, x8
 ; CHECK-SD-NEXT:    addv h0, v0.8h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -593,9 +591,8 @@ define i16 @addv_zero_lanes_v8i16(ptr %arr)  {
 define i32 @addv_zero_lanes_v4i32(ptr %arr)  {
 ; CHECK-SD-LABEL: addv_zero_lanes_v4i32:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-SD-NEXT:    ldr w8, [x0]
-; CHECK-SD-NEXT:    mov v0.d[0], x8
+; CHECK-SD-NEXT:    fmov d0, x8
 ; CHECK-SD-NEXT:    addv s0, v0.4s
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
index 99a857027e87d..ff2d5c68af531 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
@@ -822,15 +822,14 @@ define i64 @red_mla_dup_ext_u8_s8_s64(ptr noalias noundef readonly captures(none
 ; CHECK-SD-NEXT:  // %bb.9: // %vec.epilog.iter.check
 ; CHECK-SD-NEXT:    cbz x11, .LBB6_13
 ; CHECK-SD-NEXT:  .LBB6_10: // %vec.epilog.ph
-; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-SD-NEXT:    mov w11, w1
-; CHECK-SD-NEXT:    movi v1.2d, #0000000000000000
-; CHECK-SD-NEXT:    sxtb x11, w11
+; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-SD-NEXT:    movi v3.2d, #0x000000000000ff
-; CHECK-SD-NEXT:    dup v2.2s, w11
+; CHECK-SD-NEXT:    sxtb x11, w11
+; CHECK-SD-NEXT:    fmov d2, x8
+; CHECK-SD-NEXT:    dup v1.2s, w11
 ; CHECK-SD-NEXT:    mov x11, x10
 ; CHECK-SD-NEXT:    and x10, x9, #0xfffffffc
-; CHECK-SD-NEXT:    mov v0.d[0], x8
 ; CHECK-SD-NEXT:    sub x8, x11, x10
 ; CHECK-SD-NEXT:    add x11, x0, x11
 ; CHECK-SD-NEXT:  .LBB6_11: // %vec.epilog.vector.body
@@ -845,11 +844,11 @@ define i64 @red_mla_dup_ext_u8_s8_s64(ptr noalias noundef readonly captures(none
 ; CHECK-SD-NEXT:    and v4.16b, v4.16b, v3.16b
 ; CHECK-SD-NEXT:    xtn v5.2s, v5.2d
 ; CHECK-SD-NEXT:    xtn v4.2s, v4.2d
-; CHECK-SD-NEXT:    smlal v1.2d, v2.2s, v4.2s
-; CHECK-SD-NEXT:    smlal v0.2d, v2.2s, v5.2s
+; CHECK-SD-NEXT:    smlal v0.2d, v1.2s, v4.2s
+; CHECK-SD-NEXT:    smlal v2.2d, v1.2s, v5.2s
 ; CHECK-SD-NEXT:    b.ne .LBB6_11
 ; CHECK-SD-NEXT:  // %bb.12: // %vec.epilog.middle.block
-; CHECK-SD-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    add v0.2d, v2.2d, v0.2d
 ; CHECK-SD-NEXT:    cmp x10, x9
 ; CHECK-SD-NEXT:    addp d0, v0.2d
 ; CHECK-SD-NEXT:    fmov x8, d0
diff --git a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
index bae254bbd2104..5af21da321d2a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vector-insertion.ll
@@ -318,10 +318,7 @@ define <4 x float> @test_insert_2_f32_undef_zero(float %a) {
 define <2 x double> @test_insert_v2f64_undef_insert1(double %a) {
 ; CHECK-LABEL: test_insert_v2f64_undef_insert1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi.2d v1, #0000000000000000
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov.d v1[0], v0[0]
-; CHECK-NEXT:    mov.16b v0, v1
+; CHECK-NEXT:    fmov d0, d0
 ; CHECK-NEXT:    ret
   %v.0 = insertelement <2 x double > <double undef, double 0.000000e+00>, double %a, i32 0
   ret <2 x double> %v.0
diff --git a/llvm/test/CodeGen/AArch64/bitcast-extend.ll b/llvm/test/CodeGen/AArch64/bitcast-extend.ll
index 741dcf3ad4c2f..b981c1701725a 100644
--- a/llvm/test/CodeGen/AArch64/bitcast-extend.ll
+++ b/llvm/test/CodeGen/AArch64/bitcast-extend.ll
@@ -339,9 +339,8 @@ define <8 x i8> @load_sext_i32_v8i8(ptr %p) {
 define <16 x i8> @load_zext_v16i8(ptr %p) {
 ; CHECK-SD-LABEL: load_zext_v16i8:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-SD-NEXT:    ldr w8, [x0]
-; CHECK-SD-NEXT:    mov v0.d[0], x8
+; CHECK-SD-NEXT:    fmov d0, x8
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: load_zext_v16i8:
diff --git a/llvm/test/CodeGen/AArch64/ctpop.ll b/llvm/test/CodeGen/AArch64/ctpop.ll
index 9c59f1b233b5d..84984c23f129e 100644
--- a/llvm/test/CodeGen/AArch64/ctpop.ll
+++ b/llvm/test/CodeGen/AArch64/ctpop.ll
@@ -599,10 +599,9 @@ entry:
 define i128 @i128_mask(i128 %x) {
 ; CHECK-SD-LABEL: i128_mask:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-SD-NEXT:    and x8, x0, #0xff
 ; CHECK-SD-NEXT:    mov x1, xzr
-; CHECK-SD-NEXT:    mov v0.d[0], x8
+; CHECK-SD-NEXT:    fmov d0, x8
 ; CHECK-SD-NEXT:    cnt v0.16b, v0.16b
 ; CHECK-SD-NEXT:    addv b0, v0.16b
 ; CHECK-SD-NEXT:    fmov x0, d0
diff --git a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
index fd7c869fe2f92..137a7feb1a85c 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
@@ -829,9 +829,9 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
-; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    fmov d1, x9
+; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-CVT-SD-NEXT:    fmov d1, x8
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -858,9 +858,9 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
-; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    fmov d1, x9
+; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-FP16-SD-NEXT:    fmov d1, x8
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
@@ -1296,9 +1296,9 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
-; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    fmov d1, x9
+; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-CVT-SD-NEXT:    fmov d1, x8
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -1326,9 +1326,9 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
-; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    fmov d1, x9
+; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-FP16-SD-NEXT:    fmov d1, x8
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
@@ -1748,9 +1748,9 @@ define <2 x i64> @utest_f16i64(<2 x half> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
-; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    fmov d1, x9
+; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-CVT-SD-NEXT:    fmov d1, x8
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -1778,9 +1778,9 @@ define <2 x i64> @utest_f16i64(<2 x half> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
-; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    fmov d1, x9
+; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-FP16-SD-NEXT:    fmov d1, x8
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
@@ -2774,9 +2774,9 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
-; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    fmov d1, x9
+; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-CVT-SD-NEXT:    fmov d1, x8
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -2803,9 +2803,9 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
-; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    fmov d1, x9
+; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-FP16-SD-NEXT:    fmov d1, x8
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
@@ -3232,9 +3232,9 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
-; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    fmov d1, x9
+; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-CVT-SD-NEXT:    fmov d1, x8
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -3262,9 +3262,9 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
-; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    fmov d1, x9
+; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-FP16-SD-NEXT:    fmov d1, x8
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
@@ -3675,9 +3675,9 @@ define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
-; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    fmov d1, x9
+; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-CVT-SD-NEXT:    fmov d1, x8
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -3705,9 +3705,9 @@ define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
-; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    fmov d1, x9
+; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
+; CHECK-FP16-SD-NEXT:    fmov d1, x8
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
diff --git a/llvm/test/CodeGen/AArch64/implicitly-set-zero-high-64-bits.ll b/llvm/test/CodeGen/AArch64/implicitly-set-zero-high-64-bits.ll
index adde5429a6d93..c7e22b2f4301d 100644
--- a/llvm/test/CodeGen/AArch64/implicitly-set-zero-high-64-bits.ll
+++ b/llvm/test/CodeGen/AArch64/implicitly-set-zero-high-64-bits.ll
@@ -95,10 +95,7 @@ entry:
 define <2 x double> @fadd(double noundef %x, double noundef %y) {
 ; CHECK-LABEL: fadd:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0000000000000000
 ; CHECK-NEXT:    fadd d0, d0, d1
-; CHECK-NEXT:    mov v2.d[0], v0.d[0]
-; CHECK-NEXT:    mov v0.16b, v2.16b
 ; CHECK-NEXT:    ret
 entry:
   %add = fadd double %x, %y
diff --git a/llvm/test/CodeGen/AArch64/neon-lowhalf128-optimisation.ll b/llvm/test/CodeGen/AArch64/neon-lowhalf128-optimisation.ll
new file mode 100644
index 0000000000000..38be2992c8211
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/neon-lowhalf128-optimisation.ll
@@ -0,0 +1,91 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-linux-gnu -o - %s | FileCheck %s
+
+define <2 x i64> @low_vector_splat_v2i64_from_i64(i64 %0){
+; CHECK-LABEL: low_vector_splat_v2i64_from_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    ret
+  %2 = insertelement <1 x i64> poison, i64 %0, i64 0
+  %3 = shufflevector <1 x i64> %2, <1 x i64> zeroinitializer, <2 x i32> <i32 0, i32 1>
+  ret <2 x i64> %3
+}
+
+define <4 x i32> @low_vector_splat_v4i32_from_i32(i32 %0) {
+; CHECK-LABEL: low_vector_splat_v4i32_from_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.2s, w0
+; CHECK-NEXT:    ret
+  %2 = insertelement <2 x i32> poison, i32 %0, i64 0
+  %3 = shufflevector <2 x i32> %2, <2 x i32> poison, <2 x i32> zeroinitializer
+  %4 = shufflevector <2 x i32> %3, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %4
+}
+
+define <8 x i16> @low_vector_splat_v8i16_from_i16(i16 %0) {
+; CHECK-LABEL: low_vector_splat_v8i16_from_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.4h, w0
+; CHECK-NEXT:    ret
+  %2 = insertelement <4 x i16> poison, i16 %0, i64 0
+  %3 = shufflevector <4 x i16> %2, <4 x i16> poison, <4 x i32> zeroinitializer
+  %4 = shufflevector <4 x i16> %3, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i16> %4
+}
+
+define <16 x i8> @low_vector_splat_v16i8_from_i8(i8 %0) {
+; CHECK-LABEL: low_vector_splat_v16i8_from_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup v0.8b, w0
+; CHECK-NEXT:    ret
+  %2 = insertelement <8 x i8> poison, i8 %0, i64 0
+  %3 = shufflevector <8 x i8> %2, <8 x i8> poison, <8 x i32> zeroinitializer
+  %4 = shufflevector <8 x i8> %3, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %4
+}
+
+define <2 x double> @low_vector_splat_v2f64_from_f64(double %0) {
+; CHECK-LABEL: low_vector_splat_v2f64_from_f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, d0
+; CHECK-NEXT:    ret
+  %2 = insertelement <1 x double> poison, double %0, i64 0
+  %3 = shufflevector <1 x double> %2, <1 x double> zeroinitializer, <2 x i32> <i32 0, i32 1>
+  ret <2 x double> %3
+}
+
+define <4 x float> @low_vector_splat_v4f32_from_f32(float %0) {
+; CHECK-LABEL: low_vector_splat_v4f32_from_f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
+; CHECK-NEXT:    dup v0.2s, v0.s[0]
+; CHECK-NEXT:    ret
+  %2 = insertelement <2 x float> poison, float %0, i64 0
+  %3 = shufflevector <2 x float> %2, <2 x float> poison, <2 x i32> zeroinitializer
+  %4 = shufflevector <2 x float> %3, <2 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x float> %4
+}
+
+define <8 x half> @low_vector_splat_v8f16_from_f16(half %0) {
+; CHECK-LABEL: low_vector_splat_v8f16_from_f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $h0 killed $h0 def $q0
+; CHECK-NEXT:    dup v0.4h, v0.h[0]
+; CHECK-NEXT:    ret
+  %2 = insertelement <4 x half> poison, half %0, i64 0
+  %3 = shufflevector <4 x half> %2, <4 x half> poison, <4 x i32> zeroinitializer
+  %4 = shufflevector <4 x half> %3, <4 x half> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x half> %4
+}
+
+define <8 x bfloat> @low_vector_splat_v8bf16_from_bf16(bfloat %0) {
+; CHECK-LABEL: low_vector_splat_v8bf16_from_bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $h0 killed $h0 def $q0
+; CHECK-NEXT:    dup v0.4h, v0.h[0]
+; CHECK-NEXT:    ret
+  %2 = insertelement <4 x bfloat> poison, bfloat %0, i64 0
+  %3 = shufflevector <4 x bfloat> %2, <4 x bfloat> poison, <4 x i32> zeroinitializer
+  %4 = shufflevector <4 x bfloat> %3, <4 x bfloat> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x bfloat> %4
+}
diff --git a/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir b/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
index aef01e42ed7cc..a68eda11d5ca1 100644
--- a/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
+++ b/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
@@ -41,6 +41,11 @@
     ret void
   }
 
+  define void @insert_vec_from_gpr64_zero_high(i64 %v, ptr %dst) {
+  entry:
+    ret void
+  }
+
   attributes #0 = { nocallback nofree nosync nounwind willreturn memory(none) }
 
 ...
@@ -521,4 +526,50 @@ body:             |
     STRSui killed %16, %0, 0 :: (store (s32) into %ir.hist)
     RET_ReallyLR
 
+---
+name:            insert_vec_from_gpr64_zero_high
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: gpr64common, preferred-register: '' }
+  - { id: 1, class: gpr64common, preferred-register: '' }
+  - { id: 2, class: fpr64, preferred-register: '' }
+  - { id: 3, class: fpr128, preferred-register: '' }
+  - { id: 4, class: fpr128, preferred-register: '' }
+  - { id: 5, class: fpr64, preferred-register: '' }
+  - { id: 6, class: fpr128, preferred-register: '' }
+  - { id: 7, class: fpr128, preferred-register: '' }
+  - { id: 8, class: fpr128, preferred-register: '' }
+liveins:
+  - { reg: '$x0', virtual-reg: '%0' }
+  - { reg: '$x1', virtual-reg: '%1' }
+body:             |
+  bb.0.entry:
+    liveins: $x0, $x1
+
+    ; CHECK-LABEL: name: insert_vec_from_gpr64_zero_high
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[PTR:%[0-9]+]]:gpr64common = COPY $x0
+    ; CHECK-NEXT: [[VAL:%[0-9]+]]:gpr64common = COPY $x1
+    ; CHECK-NEXT: [[GPR:%[0-9]+]]:gpr64 = COPY [[VAL]]
+    ; CHECK-NEXT: [[FMOV:%[0-9]+]]:fpr64 = FMOVXDr killed [[GPR]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_LOW:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[FMOV]], %subreg.dsub
+    ; CHECK-NEXT: [[MOVID:%[0-9]+]]:fpr64 = MOVID 0
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_ZERO:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], killed [[MOVID]], %subreg.dsub
+    ; CHECK-NEXT: STRQui killed [[INSERT_LOW]], [[PTR]], 0 :: (store (s128) into %ir.dst, align 8)
+    ; CHECK-NEXT: RET_ReallyLR
+    %0:gpr64common = COPY $x0
+    %1:gpr64common = COPY $x1
+    %2:fpr64 = COPY %1
+    %4:fpr128 = IMPLICIT_DEF
+    %3:fpr128 = INSERT_SUBREG %4, %2, %subreg.dsub
+    %5:fpr64 = MOVID 0
+    %7:fpr128 = IMPLICIT_DEF
+    %6:fpr128 = INSERT_SUBREG %7, killed %5, %subreg.dsub
+    %8:fpr128 = INSvi64lane %3, 1, killed %6, 0
+    STRQui killed %8, %0, 0 :: (store (s128) into %ir.dst, align 8)
+    RET_ReallyLR
+
 ...

>From 84f312cf95a0442f299aee964713b75d2badaf91 Mon Sep 17 00:00:00 2001
From: Amichaxx <amina.chabane at arm.com>
Date: Tue, 27 Jan 2026 15:13:36 +0000
Subject: [PATCH 2/7] Clang format

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d4e4da09ac551..630cb8017b662 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16236,9 +16236,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
       EVT LaneVT = VT.getVectorElementType();
       EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
 
-      SDValue HiZero = LaneVT.isInteger()
-                           ? DAG.getConstant(0, DL, HalfVT)
-                           : DAG.getConstantFP(0.0, DL, HalfVT);
+      SDValue HiZero = LaneVT.isInteger() ? DAG.getConstant(0, DL, HalfVT)
+                                          : DAG.getConstantFP(0.0, DL, HalfVT);
 
       SDValue LoHalf =
           LaneVT.getSizeInBits() == 64

>From 0db67fc785ab6adbc8f39983b6bc2f125db65c2d Mon Sep 17 00:00:00 2001
From: Amichaxx <amina.chabane at arm.com>
Date: Wed, 4 Feb 2026 14:57:03 +0000
Subject: [PATCH 3/7] removed all_of loop upon @ilinpv's suggestion

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 49 +++++++++++--------
 1 file changed, 28 insertions(+), 21 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 630cb8017b662..2cb0417cbd759 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16160,8 +16160,24 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
   SmallMapVector<SDValue, unsigned, 16> DifferentValueMap;
   unsigned ConsecutiveValCount = 0;
   SDValue PrevVal;
+  auto IsZero = [&](SDValue V) {
+    return isNullConstant(V) || isNullFPConstant(V);
+  };
+  bool MaybeLowHalfZeroHigh =
+      VT.isFixedLengthVector() && VT.getSizeInBits() == 128 && NumElts != 0;
+  unsigned HalfElts = MaybeLowHalfZeroHigh ? (NumElts >> 1) : 0;
+  SDValue LowHalfFirstVal =
+      MaybeLowHalfZeroHigh ? Op.getOperand(0) : SDValue();
   for (unsigned i = 0; i < NumElts; ++i) {
     SDValue V = Op.getOperand(i);
+    if (MaybeLowHalfZeroHigh) {
+      if (i < HalfElts) {
+        if (V != LowHalfFirstVal)
+          MaybeLowHalfZeroHigh = false;
+      } else if (!IsZero(V)) {
+        MaybeLowHalfZeroHigh = false;
+      }
+    }
     if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
       AllLanesExtractElt = false;
     if (V.isUndef()) {
@@ -16222,30 +16238,21 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
     return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
   }
 
-  if (VT.isFixedLengthVector() && VT.getSizeInBits() == 128 && NumElts != 0) {
-    const unsigned HalfElts = NumElts >> 1;
-    auto IsZero = [&](SDValue V) {
-      return isNullConstant(V) || isNullFPConstant(V);
-    };
-    SDValue FirstVal = Op.getOperand(0);
-    if (!isIntOrFPConstant(FirstVal) &&
-        llvm::all_of(llvm::seq<unsigned>(0, NumElts), [&](unsigned I) {
-          SDValue Vi = Op.getOperand(I);
-          return I < HalfElts ? (Vi == FirstVal) : IsZero(Vi);
-        })) {
-      EVT LaneVT = VT.getVectorElementType();
-      EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
+  if (MaybeLowHalfZeroHigh && LowHalfFirstVal.getNode() &&
+      !LowHalfFirstVal.isUndef() &&
+      !isIntOrFPConstant(LowHalfFirstVal)) {
+    EVT LaneVT = VT.getVectorElementType();
+    EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
 
-      SDValue HiZero = LaneVT.isInteger() ? DAG.getConstant(0, DL, HalfVT)
-                                          : DAG.getConstantFP(0.0, DL, HalfVT);
+    SDValue HiZero = LaneVT.isInteger() ? DAG.getConstant(0, DL, HalfVT)
+                                        : DAG.getConstantFP(0.0, DL, HalfVT);
 
-      SDValue LoHalf =
-          LaneVT.getSizeInBits() == 64
-              ? DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, HalfVT, FirstVal)
-              : DAG.getNode(AArch64ISD::DUP, DL, HalfVT, FirstVal);
+    SDValue LoHalf =
+        LaneVT.getSizeInBits() == 64
+            ? DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, HalfVT, LowHalfFirstVal)
+            : DAG.getNode(AArch64ISD::DUP, DL, HalfVT, LowHalfFirstVal);
 
-      return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoHalf, HiZero);
-    }
+    return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoHalf, HiZero);
   }
 
   if (AllLanesExtractElt) {

>From b0b8c4e2d74a3a2e55db55864697e48c62d7f799 Mon Sep 17 00:00:00 2001
From: Amichaxx <amina.chabane at arm.com>
Date: Wed, 4 Feb 2026 14:59:32 +0000
Subject: [PATCH 4/7] Clang format

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 2cb0417cbd759..c3e7b43e09b3e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16166,8 +16166,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
   bool MaybeLowHalfZeroHigh =
       VT.isFixedLengthVector() && VT.getSizeInBits() == 128 && NumElts != 0;
   unsigned HalfElts = MaybeLowHalfZeroHigh ? (NumElts >> 1) : 0;
-  SDValue LowHalfFirstVal =
-      MaybeLowHalfZeroHigh ? Op.getOperand(0) : SDValue();
+  SDValue LowHalfFirstVal = MaybeLowHalfZeroHigh ? Op.getOperand(0) : SDValue();
   for (unsigned i = 0; i < NumElts; ++i) {
     SDValue V = Op.getOperand(i);
     if (MaybeLowHalfZeroHigh) {
@@ -16239,8 +16238,7 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
   }
 
   if (MaybeLowHalfZeroHigh && LowHalfFirstVal.getNode() &&
-      !LowHalfFirstVal.isUndef() &&
-      !isIntOrFPConstant(LowHalfFirstVal)) {
+      !LowHalfFirstVal.isUndef() && !isIntOrFPConstant(LowHalfFirstVal)) {
     EVT LaneVT = VT.getVectorElementType();
     EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext());
 

>From 299e1848ed3c6863a6ef3de61db0dcd8a7e3b845 Mon Sep 17 00:00:00 2001
From: Amichaxx <amina.chabane at arm.com>
Date: Mon, 9 Feb 2026 17:20:41 +0000
Subject: [PATCH 5/7] Added reduced reproduction

---
 ...arch64-neonvector-tensorflow-regression.ll | 285 ++++++++++++++++++
 1 file changed, 285 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/aarch64-neonvector-tensorflow-regression.ll

diff --git a/llvm/test/CodeGen/AArch64/aarch64-neonvector-tensorflow-regression.ll b/llvm/test/CodeGen/AArch64/aarch64-neonvector-tensorflow-regression.ll
new file mode 100644
index 0000000000000..d5a12218b2402
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-neonvector-tensorflow-regression.ll
@@ -0,0 +1,285 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -start-before=aarch64-isel %s -o /dev/null
+; Regression test for AArch64 compile-time regression, referring to PR #166962.
+source_filename = "third_party/tensorflow/core/kernels/image/resize_bicubic_op.cc"
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+target triple = "aarch64-grtev4-linux-gnu"
+
+%"struct.std::__u::array" = type { [4 x i64] }
+%"class.tensorflow::(anonymous namespace)::CachedInterpolationCalculator" = type { [4 x i64] }
+%"struct.tensorflow::InitOnStartupMarker" = type { i8 }
+
+declare void @llvm.lifetime.start.p0(ptr captures(none))
+
+declare void @llvm.lifetime.end.p0(ptr captures(none))
+
+define fastcc void @_ZN10tensorflow12_GLOBAL__N_125ComputeXWeightsAndIndicesERKNS_17ImageResizerStateEbPNSt3__u6vectorINS0_17WeightsAndIndicesENS4_9allocatorIS6_EEEE(ptr noundef nonnull readonly captures(none) %x_wais) {
+entry:
+  %new_x_indices.i116 = alloca %"struct.std::__u::array", align 8
+  %new_x_indices.i = alloca %"struct.std::__u::array", align 8
+  %calc = alloca %"class.tensorflow::(anonymous namespace)::CachedInterpolationCalculator", align 8
+  call void @llvm.lifetime.start.p0(ptr nonnull %calc)
+  call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(32) %calc, i8 -1, i64 32, i1 false)
+  %0 = load i64, ptr null, align 8
+  br i1 false, label %for.cond.preheader, label %for.cond4.preheader
+
+for.cond4.preheader:                              ; preds = %entry
+  %1 = icmp sgt i64 %0, 0
+  br label %for.body8
+
+for.cond.preheader:                               ; preds = %entry
+  %2 = icmp sgt i64 %0, 0
+  %sunkaddr = getelementptr i8, ptr %x_wais, i64 8
+  %x_wais.val102 = load i64, ptr %sunkaddr, align 8
+  %cmp.i = icmp ult i64 0, %x_wais.val102
+  %x_wais.val101 = load ptr, ptr %x_wais, align 8
+  %3 = load i64, ptr null, align 8
+  %4 = load float, ptr null, align 4
+  %scevgep230 = getelementptr i8, ptr %x_wais.val101, i64 0
+  tail call fastcc void null(float noundef %4, i64 noundef 0, i64 noundef %3, ptr noundef nonnull %scevgep230)
+  %sunkaddr256 = getelementptr i8, ptr %x_wais, i64 8
+  %x_wais.val100 = load i64, ptr %sunkaddr256, align 8
+  %cmp.i103 = icmp ult i64 0, %x_wais.val100
+  %x_wais.val99 = load ptr, ptr %x_wais, align 8
+  %scevgep228 = getelementptr i8, ptr %x_wais.val99, i64 0
+  %scevgep229 = getelementptr i8, ptr %scevgep228, i64 16
+  %5 = load i64, ptr %scevgep229, align 8
+  %scevgep226 = getelementptr i8, ptr %x_wais.val99, i64 0
+  %scevgep227 = getelementptr i8, ptr %scevgep226, i64 24
+  %6 = load i64, ptr %scevgep227, align 8
+  %scevgep224 = getelementptr i8, ptr %x_wais.val99, i64 0
+  %scevgep225 = getelementptr i8, ptr %scevgep224, i64 32
+  %7 = load i64, ptr %scevgep225, align 8
+  %scevgep222 = getelementptr i8, ptr %x_wais.val99, i64 0
+  %scevgep223 = getelementptr i8, ptr %scevgep222, i64 40
+  %8 = load i64, ptr %scevgep223, align 8
+  call void @llvm.lifetime.start.p0(ptr nonnull %new_x_indices.i)
+  store i64 %5, ptr %new_x_indices.i, align 8
+  %sunkaddr257 = getelementptr inbounds i8, ptr %new_x_indices.i, i64 8
+  store i64 %6, ptr %sunkaddr257, align 8
+  %sunkaddr258 = getelementptr inbounds i8, ptr %new_x_indices.i, i64 16
+  store i64 %7, ptr %sunkaddr258, align 8
+  %sunkaddr259 = getelementptr inbounds i8, ptr %new_x_indices.i, i64 24
+  store i64 %8, ptr %sunkaddr259, align 8
+  %9 = load i64, ptr %calc, align 8
+  %cmp4.not.i = icmp eq i64 %9, %5
+  %sunkaddr260 = getelementptr inbounds i8, ptr %calc, i64 8
+  %10 = load i64, ptr %sunkaddr260, align 8
+  %cmp4.152.i = icmp eq i64 %10, %5
+  store i64 %5, ptr %calc, align 8
+  br label %if.end.1.i
+
+if.end.1.i:                                       ; preds = %for.cond.preheader
+  %new_indices_hand.15361.i = phi i64 [ 1, %for.cond.preheader ]
+  %sunkaddr261 = getelementptr inbounds i8, ptr %calc, i64 16
+  %11 = load i64, ptr %sunkaddr261, align 8
+  %arrayidx.i.2.i = getelementptr inbounds nuw i64, ptr %new_x_indices.i, i64 %new_indices_hand.15361.i
+  %12 = load i64, ptr %arrayidx.i.2.i, align 8
+  %cmp4.2.i = icmp eq i64 %11, %12
+  %cmp5.2.i = icmp samesign ult i64 %new_indices_hand.15361.i, 2
+  %arrayidx12.2.i = getelementptr inbounds nuw i64, ptr %calc, i64 %new_indices_hand.15361.i
+  store i64 %11, ptr %arrayidx12.2.i, align 8
+  %inc13.2.i = add nuw nsw i64 %new_indices_hand.15361.i, 1
+  %arrayidx.i.3.i.phi.trans.insert = getelementptr inbounds nuw i64, ptr %new_x_indices.i, i64 %inc13.2.i
+  %.pre189 = load i64, ptr %arrayidx.i.3.i.phi.trans.insert, align 8
+  %sunkaddr262 = getelementptr inbounds i8, ptr %calc, i64 24
+  %13 = load i64, ptr %sunkaddr262, align 8
+  %cmp4.3.i = icmp eq i64 %13, %.pre189
+  %cmp5.3.i = icmp samesign ult i64 %inc13.2.i, 3
+  %arrayidx12.3.i = getelementptr inbounds nuw i64, ptr %calc, i64 %inc13.2.i
+  store i64 %.pre189, ptr %arrayidx12.3.i, align 8
+  %inc13.3.i = add nuw nsw i64 %inc13.2.i, 1
+  %cond = icmp eq i64 %inc13.3.i, 2
+  call void @llvm.lifetime.end.p0(ptr nonnull %new_x_indices.i)
+  %scevgep220 = getelementptr i8, ptr %x_wais.val99, i64 0
+  %scevgep221 = getelementptr i8, ptr %scevgep220, i64 48
+  %14 = trunc i64 %inc13.3.i to i32
+  store i32 %14, ptr %scevgep221, align 8
+  %inc = add nuw nsw i64 0, 1
+  %15 = load i64, ptr null, align 8
+  %lsr.iv.next219 = add nuw i64 0, 56
+  %cmp = icmp slt i64 %inc, %15
+  %cmp26184 = icmp sgt i64 %15, 0
+  br label %for.body28
+
+for.body8:                                        ; preds = %for.body8, %for.cond4.preheader
+  %lsr.iv231 = phi i64 [ 48, %for.cond4.preheader ], [ %lsr.iv.next232, %for.body8 ]
+  %x3.0181 = phi i64 [ 0, %for.cond4.preheader ], [ %inc21, %for.body8 ]
+  %16 = load i64, ptr null, align 8
+  %sunkaddr268 = getelementptr i8, ptr %x_wais, i64 8
+  %x_wais.val98 = load i64, ptr %sunkaddr268, align 8
+  %cmp.i107 = icmp ult i64 %x3.0181, %x_wais.val98
+  %x_wais.val97 = load ptr, ptr %x_wais, align 8
+  %17 = load float, ptr null, align 4
+  %scevgep252 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
+  %tmp = trunc i64 %x3.0181 to i32
+  %conv.i.i = sitofp i32 %tmp to float
+  %mul.i.i = fmul float %17, %conv.i.i
+  %18 = tail call noundef float @llvm.floor.f32(float %mul.i.i)
+  %conv2.i = fptosi float %18 to i64
+  %conv3.i = sitofp i64 %conv2.i to float
+  %sub.i = fsub float %mul.i.i, %conv3.i
+  %mul.i = fmul float %sub.i, 1.024000e+03
+  %call4.i = tail call i64 null(float noundef %mul.i)
+  %19 = load atomic i8, ptr getelementptr inbounds (<{ %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", [2 x i8], ptr, i64, ptr, i64 }>, ptr null, i32 0, i32 18) acquire, align 8
+  %20 = zext i8 %19 to i32
+  %21 = and i32 %20, 1
+  %guard.uninitialized1.i.i = icmp eq i32 %21, 0
+  %22 = tail call i32 null(ptr nonnull getelementptr inbounds (<{ %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", [2 x i8], ptr, i64, ptr, i64 }>, ptr null, i32 0, i32 18))
+  %tobool3.not.i.i = icmp eq i32 %22, 0
+  %call5.i.i = tail call fastcc noundef ptr null(double noundef -7.500000e-01)
+  store ptr %call5.i.i, ptr getelementptr inbounds (<{ %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", [2 x i8], ptr, i64, ptr, i64 }>, ptr null, i32 0, i32 17), align 8
+  tail call void null(ptr nonnull getelementptr inbounds (<{ %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", [2 x i8], ptr, i64, ptr, i64 }>, ptr null, i32 0, i32 18))
+  %retval.0.i.i = load ptr, ptr getelementptr inbounds (<{ %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", [2 x i8], ptr, i64, ptr, i64 }>, ptr null, i32 0, i32 17), align 8
+  %mul6.i = shl nsw i64 %call4.i, 1
+  %23 = getelementptr float, ptr %retval.0.i.i, i64 %mul6.i
+  %arrayidx.i111 = getelementptr i8, ptr %23, i64 4
+  %24 = load float, ptr %arrayidx.i111, align 4
+  %sunkaddr270 = getelementptr i8, ptr %scevgep252, i64 -48
+  store float %24, ptr %sunkaddr270, align 8
+  %25 = load float, ptr %23, align 4
+  %scevgep250 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
+  %scevgep251 = getelementptr i8, ptr %scevgep250, i64 -44
+  store float %25, ptr %scevgep251, align 4
+  %mul10.i = sub i64 2048, %mul6.i
+  %arrayidx11.i = getelementptr inbounds float, ptr %retval.0.i.i, i64 %mul10.i
+  %26 = load float, ptr %arrayidx11.i, align 4
+  %scevgep248 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
+  %scevgep249 = getelementptr i8, ptr %scevgep248, i64 -40
+  store float %26, ptr %scevgep249, align 8
+  %add14.i = sub i64 2049, %mul6.i
+  %arrayidx15.i = getelementptr inbounds float, ptr %retval.0.i.i, i64 %add14.i
+  %27 = load float, ptr %arrayidx15.i, align 4
+  %scevgep246 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
+  %scevgep247 = getelementptr i8, ptr %scevgep246, i64 -36
+  store float %27, ptr %scevgep247, align 4
+  %sub.i.i = add nsw i64 %16, -1
+  %28 = insertelement <2 x i64> poison, i64 %conv2.i, i64 0
+  %29 = shufflevector <2 x i64> %28, <2 x i64> poison, <2 x i32> zeroinitializer
+  %30 = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %29, <2 x i64> <i64 1, i64 0>)
+  %scevgep244 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
+  %scevgep245 = getelementptr i8, ptr %scevgep244, i64 -32
+  %31 = add nsw <2 x i64> %30, <i64 -1, i64 0>
+  %32 = insertelement <2 x i64> poison, i64 %sub.i.i, i64 0
+  %33 = shufflevector <2 x i64> %32, <2 x i64> poison, <2 x i32> zeroinitializer
+  %34 = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %31, <2 x i64> %33)
+  store <2 x i64> %34, ptr %scevgep245, align 8
+  %35 = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %29, <2 x i64> <i64 -1, i64 -2>)
+  %scevgep242 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
+  %scevgep243 = getelementptr i8, ptr %scevgep242, i64 -16
+  %36 = add nsw <2 x i64> %35, <i64 1, i64 2>
+  %37 = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %36, <2 x i64> %33)
+  store <2 x i64> %37, ptr %scevgep243, align 8
+  %sunkaddr271 = getelementptr i8, ptr %x_wais, i64 8
+  %x_wais.val96 = load i64, ptr %sunkaddr271, align 8
+  %cmp.i112 = icmp ult i64 %x3.0181, %x_wais.val96
+  %x_wais.val95 = load ptr, ptr %x_wais, align 8
+  %scevgep240 = getelementptr i8, ptr %x_wais.val95, i64 %lsr.iv231
+  %scevgep241 = getelementptr i8, ptr %scevgep240, i64 -32
+  %38 = load i64, ptr %scevgep241, align 8
+  %scevgep238 = getelementptr i8, ptr %x_wais.val95, i64 %lsr.iv231
+  %scevgep239 = getelementptr i8, ptr %scevgep238, i64 -24
+  %39 = load i64, ptr %scevgep239, align 8
+  %scevgep236 = getelementptr i8, ptr %x_wais.val95, i64 %lsr.iv231
+  %scevgep237 = getelementptr i8, ptr %scevgep236, i64 -16
+  %40 = load i64, ptr %scevgep237, align 8
+  %scevgep234 = getelementptr i8, ptr %x_wais.val95, i64 %lsr.iv231
+  %scevgep235 = getelementptr i8, ptr %scevgep234, i64 -8
+  %41 = load i64, ptr %scevgep235, align 8
+  call void @llvm.lifetime.start.p0(ptr nonnull %new_x_indices.i116)
+  store i64 %38, ptr %new_x_indices.i116, align 8
+  %sunkaddr272 = getelementptr inbounds i8, ptr %new_x_indices.i116, i64 8
+  store i64 %39, ptr %sunkaddr272, align 8
+  %sunkaddr273 = getelementptr inbounds i8, ptr %new_x_indices.i116, i64 16
+  store i64 %40, ptr %sunkaddr273, align 8
+  %sunkaddr274 = getelementptr inbounds i8, ptr %new_x_indices.i116, i64 24
+  store i64 %41, ptr %sunkaddr274, align 8
+  %42 = load i64, ptr %calc, align 8
+  %cmp4.not.i120 = icmp eq i64 %42, %38
+  %sunkaddr275 = getelementptr inbounds i8, ptr %calc, i64 8
+  %43 = load i64, ptr %sunkaddr275, align 8
+  %cmp4.1.i161 = icmp eq i64 %43, %39
+  %sunkaddr276 = getelementptr inbounds i8, ptr %calc, i64 16
+  %44 = load i64, ptr %sunkaddr276, align 8
+  %arrayidx.i.2.i128 = getelementptr inbounds nuw i64, ptr %new_x_indices.i116, i64 2
+  %45 = load i64, ptr %arrayidx.i.2.i128, align 8
+  %cmp4.2.i129 = icmp eq i64 %44, %45
+  %cmp5.2.i149 = icmp samesign ult i64 2, 2
+  %arrayidx12.2.i154 = getelementptr inbounds nuw i64, ptr %calc, i64 2
+  store i64 %44, ptr %arrayidx12.2.i154, align 8
+  %inc13.2.i151 = add nuw nsw i64 2, 1
+  %arrayidx.i.3.i134.phi.trans.insert = getelementptr inbounds nuw i64, ptr %new_x_indices.i116, i64 %inc13.2.i151
+  %.pre = load i64, ptr %arrayidx.i.3.i134.phi.trans.insert, align 8
+  %sunkaddr277 = getelementptr inbounds i8, ptr %calc, i64 24
+  %46 = load i64, ptr %sunkaddr277, align 8
+  %cmp4.3.i135 = icmp eq i64 %46, %.pre
+  %cmp5.3.i143 = icmp samesign ult i64 %inc13.2.i151, 3
+  %arrayidx12.3.i147 = getelementptr inbounds nuw i64, ptr %calc, i64 %inc13.2.i151
+  store i64 %.pre, ptr %arrayidx12.3.i147, align 8
+  %inc13.3.i145 = add nuw nsw i64 %inc13.2.i151, 1
+  call void @llvm.lifetime.end.p0(ptr nonnull %new_x_indices.i116)
+  %scevgep233 = getelementptr i8, ptr %x_wais.val95, i64 %lsr.iv231
+  %47 = trunc i64 %inc13.3.i145 to i32
+  store i32 %47, ptr %scevgep233, align 8
+  %inc21 = add nuw nsw i64 %x3.0181, 1
+  %48 = load i64, ptr null, align 8
+  %lsr.iv.next232 = add i64 %lsr.iv231, 56
+  %cmp6 = icmp slt i64 %inc21, %48
+  br label %for.body8
+
+for.body28:                                       ; preds = %for.body28, %if.end.1.i
+  %lsr.iv = phi i64 [ 0, %if.end.1.i ], [ %lsr.iv.next, %for.body28 ]
+  %indvars.iv = phi i64 [ 0, %if.end.1.i ], [ %indvars.iv.next, %for.body28 ]
+  %sunkaddr282 = getelementptr i8, ptr %x_wais, i64 8
+  %x_wais.val94 = load i64, ptr %sunkaddr282, align 8
+  %cmp.i163 = icmp ugt i64 %x_wais.val94, %indvars.iv
+  %x_wais.val93 = load ptr, ptr %x_wais, align 8
+  %49 = load i64, ptr null, align 8
+  %scevgep216 = getelementptr i8, ptr %x_wais.val93, i64 %lsr.iv
+  %scevgep217 = getelementptr i8, ptr %scevgep216, i64 16
+  %50 = load i64, ptr %scevgep217, align 8
+  %mul = mul nsw i64 %50, %49
+  store i64 %mul, ptr %scevgep217, align 8
+  %sunkaddr284 = getelementptr i8, ptr %x_wais, i64 8
+  %x_wais.val92 = load i64, ptr %sunkaddr284, align 8
+  %cmp.i167 = icmp ugt i64 %x_wais.val92, %indvars.iv
+  %x_wais.val91 = load ptr, ptr %x_wais, align 8
+  %51 = load i64, ptr null, align 8
+  %scevgep214 = getelementptr i8, ptr %x_wais.val91, i64 %lsr.iv
+  %scevgep215 = getelementptr i8, ptr %scevgep214, i64 24
+  %52 = load i64, ptr %scevgep215, align 8
+  %mul36 = mul nsw i64 %52, %51
+  store i64 %mul36, ptr %scevgep215, align 8
+  %sunkaddr286 = getelementptr i8, ptr %x_wais, i64 8
+  %x_wais.val90 = load i64, ptr %sunkaddr286, align 8
+  %cmp.i171 = icmp ugt i64 %x_wais.val90, %indvars.iv
+  %x_wais.val89 = load ptr, ptr %x_wais, align 8
+  %53 = load i64, ptr null, align 8
+  %scevgep212 = getelementptr i8, ptr %x_wais.val89, i64 %lsr.iv
+  %scevgep213 = getelementptr i8, ptr %scevgep212, i64 32
+  %54 = load i64, ptr %scevgep213, align 8
+  %mul41 = mul nsw i64 %54, %53
+  store i64 %mul41, ptr %scevgep213, align 8
+  %sunkaddr288 = getelementptr i8, ptr %x_wais, i64 8
+  %x_wais.val88 = load i64, ptr %sunkaddr288, align 8
+  %cmp.i175 = icmp ugt i64 %x_wais.val88, %indvars.iv
+  %x_wais.val = load ptr, ptr %x_wais, align 8
+  %55 = load i64, ptr null, align 8
+  %scevgep = getelementptr i8, ptr %x_wais.val, i64 %lsr.iv
+  %scevgep211 = getelementptr i8, ptr %scevgep, i64 40
+  %56 = load i64, ptr %scevgep211, align 8
+  %mul46 = mul nsw i64 %56, %55
+  store i64 %mul46, ptr %scevgep211, align 8
+  %indvars.iv.next = add nuw i64 %indvars.iv, 1
+  %57 = load i64, ptr null, align 8
+  %lsr.iv.next = add nuw i64 %lsr.iv, 56
+  %cmp26 = icmp sgt i64 %57, %indvars.iv.next
+  br label %for.body28
+}
+
+declare void @llvm.memset.p0.i64(ptr writeonly captures(none), i8, i64, i1 immarg)
+
+declare float @llvm.floor.f32(float)
+
+declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
+
+declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)

>From 7ed54f14d5d1698144725242462df790aa94eb82 Mon Sep 17 00:00:00 2001
From: Amichaxx <amina.chabane at arm.com>
Date: Thu, 19 Feb 2026 12:49:14 +0000
Subject: [PATCH 6/7] Respond to comments for AArch64ISelLowering.cpp Remove
 peephole

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 19 +++----
 .../Target/AArch64/AArch64MIPeepholeOpt.cpp   | 51 ++-----------------
 2 files changed, 14 insertions(+), 56 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c3e7b43e09b3e..bf565b3862b70 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16164,23 +16164,16 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
     return isNullConstant(V) || isNullFPConstant(V);
   };
   bool MaybeLowHalfZeroHigh =
-      VT.isFixedLengthVector() && VT.getSizeInBits() == 128 && NumElts != 0;
+      VT.isFixedLengthVector() && VT.getSizeInBits() == 128;
   unsigned HalfElts = MaybeLowHalfZeroHigh ? (NumElts >> 1) : 0;
   SDValue LowHalfFirstVal = MaybeLowHalfZeroHigh ? Op.getOperand(0) : SDValue();
   for (unsigned i = 0; i < NumElts; ++i) {
     SDValue V = Op.getOperand(i);
-    if (MaybeLowHalfZeroHigh) {
-      if (i < HalfElts) {
-        if (V != LowHalfFirstVal)
-          MaybeLowHalfZeroHigh = false;
-      } else if (!IsZero(V)) {
-        MaybeLowHalfZeroHigh = false;
-      }
-    }
     if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
       AllLanesExtractElt = false;
     if (V.isUndef()) {
       ++NumUndefLanes;
+      MaybeLowHalfZeroHigh = false;
       continue;
     }
     if (i > 0)
@@ -16207,6 +16200,14 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
       ConsecutiveValCount = 0;
       PrevVal = V;
     }
+      if (MaybeLowHalfZeroHigh) {
+      if (i < HalfElts) {
+        if (V != LowHalfFirstVal)
+          MaybeLowHalfZeroHigh = false;
+      } else if (!IsZero(V)) {
+        MaybeLowHalfZeroHigh = false;
+      }
+    }
 
     // Keep different values and its last consecutive count. For example,
     //
diff --git a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
index 398273babe1b1..21ff921da9b8a 100644
--- a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
@@ -687,57 +687,14 @@ bool AArch64MIPeepholeOpt::visitINSviGPR(MachineInstr &MI, unsigned Opc) {
 }
 
 // All instructions that set a FPR64 will implicitly zero the top bits of the
-// register. When the def is expressed as a COPY from a GPR, turn it into an
-// explicit FMOV so it cannot be elided later in further passes.
+// register.
 static bool is64bitDefwithZeroHigh64bit(MachineInstr *MI,
-                                        MachineRegisterInfo *MRI,
-                                        const AArch64InstrInfo *TII) {
+                                        MachineRegisterInfo *MRI) {
   if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
     return false;
   const TargetRegisterClass *RC = MRI->getRegClass(MI->getOperand(0).getReg());
   if (RC != &AArch64::FPR64RegClass)
     return false;
-  if (MI->getOpcode() == TargetOpcode::COPY) {
-    MachineOperand &SrcOp = MI->getOperand(1);
-    if (!SrcOp.isReg())
-      return false;
-    if (SrcOp.getSubReg())
-      return false;
-    Register SrcReg = SrcOp.getReg();
-    auto IsGPR64Like = [&]() -> bool {
-      if (SrcReg.isVirtual())
-        return AArch64::GPR64allRegClass.hasSubClassEq(
-            MRI->getRegClass(SrcReg));
-      return AArch64::GPR64allRegClass.contains(SrcReg);
-    };
-    if (!IsGPR64Like())
-      return false;
-    assert(TII && "Expected InstrInfo when materializing COPYs");
-    // FMOVXDr insists on strict GPR64 operands, so fix up the COPY source.
-    MachineOperand &SrcMO = MI->getOperand(1);
-    bool SrcKill = SrcMO.isKill();
-    if (SrcReg.isVirtual()) {
-      if (MRI->getRegClass(SrcReg) != &AArch64::GPR64RegClass) {
-        // Pass the value through a temporary GPR64 vreg to satisfy the
-        // verifier.
-        Register NewSrc = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
-        BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
-                TII->get(TargetOpcode::COPY), NewSrc)
-            .addReg(SrcReg, getKillRegState(SrcKill));
-        SrcReg = NewSrc;
-        SrcKill = true;
-      }
-    } else if (!AArch64::GPR64RegClass.contains(SrcReg)) {
-      return false;
-    }
-    SrcMO.setReg(SrcReg);
-    SrcMO.setSubReg(0);
-    SrcMO.setIsKill(SrcKill);
-    // Replace the COPY with an explicit FMOV so the zeroing behaviour stays
-    // visible.
-    MI->setDesc(TII->get(AArch64::FMOVXDr));
-    return true;
-  }
   return MI->getOpcode() > TargetOpcode::GENERIC_OP_END;
 }
 
@@ -753,7 +710,7 @@ bool AArch64MIPeepholeOpt::visitINSvi64lane(MachineInstr &MI) {
   if (Low64MI->getOpcode() != AArch64::INSERT_SUBREG)
     return false;
   Low64MI = MRI->getUniqueVRegDef(Low64MI->getOperand(2).getReg());
-  if (!Low64MI || !is64bitDefwithZeroHigh64bit(Low64MI, MRI, TII))
+  if (!Low64MI || !is64bitDefwithZeroHigh64bit(Low64MI, MRI))
     return false;
 
   // Check there is `mov 0` MI for high 64-bits.
@@ -794,7 +751,7 @@ bool AArch64MIPeepholeOpt::visitINSvi64lane(MachineInstr &MI) {
 bool AArch64MIPeepholeOpt::visitFMOVDr(MachineInstr &MI) {
   // An FMOVDr sets the high 64-bits to zero implicitly, similar to ORR for GPR.
   MachineInstr *Low64MI = MRI->getUniqueVRegDef(MI.getOperand(1).getReg());
-  if (!Low64MI || !is64bitDefwithZeroHigh64bit(Low64MI, MRI, TII))
+  if (!Low64MI || !is64bitDefwithZeroHigh64bit(Low64MI, MRI))
     return false;
 
   // Let's remove MIs for high 64-bits.

>From 81e096511d12a7006aad6449417170675607dab4 Mon Sep 17 00:00:00 2001
From: Amichaxx <amina.chabane at arm.com>
Date: Thu, 19 Feb 2026 15:38:19 +0000
Subject: [PATCH 7/7] Update tests Reverted peephole-insvigpr.mir Updated
 existing tests

---
 llvm/test/CodeGen/AArch64/aarch64-addv.ll     |   3 +
 .../AArch64/aarch64-matrix-umull-smull.ll     |  15 +-
 ...arch64-neonvector-tensorflow-regression.ll | 285 ------------------
 llvm/test/CodeGen/AArch64/bitcast-extend.ll   |   1 +
 llvm/test/CodeGen/AArch64/ctpop.ll            |   1 +
 llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll |  48 +--
 .../AArch64/neon-lowhalf128-optimisation.ll   |   1 +
 .../CodeGen/AArch64/peephole-insvigpr.mir     |  51 ----
 8 files changed, 38 insertions(+), 367 deletions(-)
 delete mode 100644 llvm/test/CodeGen/AArch64/aarch64-neonvector-tensorflow-regression.ll

diff --git a/llvm/test/CodeGen/AArch64/aarch64-addv.ll b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
index de68a79824eb3..d8aeeff79b936 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-addv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-addv.ll
@@ -545,6 +545,7 @@ define i8 @addv_zero_lanes_v16i8(ptr %arr)  {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    ldrb w8, [x0]
 ; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    fmov d0, d0
 ; CHECK-SD-NEXT:    addv b0, v0.16b
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -569,6 +570,7 @@ define i16 @addv_zero_lanes_v8i16(ptr %arr)  {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    ldrh w8, [x0]
 ; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    fmov d0, d0
 ; CHECK-SD-NEXT:    addv h0, v0.8h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -593,6 +595,7 @@ define i32 @addv_zero_lanes_v4i32(ptr %arr)  {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    ldr w8, [x0]
 ; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    fmov d0, d0
 ; CHECK-SD-NEXT:    addv s0, v0.4s
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
index ff2d5c68af531..fa982ce27c7d0 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
@@ -823,13 +823,14 @@ define i64 @red_mla_dup_ext_u8_s8_s64(ptr noalias noundef readonly captures(none
 ; CHECK-SD-NEXT:    cbz x11, .LBB6_13
 ; CHECK-SD-NEXT:  .LBB6_10: // %vec.epilog.ph
 ; CHECK-SD-NEXT:    mov w11, w1
-; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-SD-NEXT:    sxtb x8, w11
 ; CHECK-SD-NEXT:    movi v3.2d, #0x000000000000ff
-; CHECK-SD-NEXT:    sxtb x11, w11
-; CHECK-SD-NEXT:    fmov d2, x8
-; CHECK-SD-NEXT:    dup v1.2s, w11
 ; CHECK-SD-NEXT:    mov x11, x10
 ; CHECK-SD-NEXT:    and x10, x9, #0xfffffffc
+; CHECK-SD-NEXT:    fmov d0, d0
+; CHECK-SD-NEXT:    dup v2.2s, w8
 ; CHECK-SD-NEXT:    sub x8, x11, x10
 ; CHECK-SD-NEXT:    add x11, x0, x11
 ; CHECK-SD-NEXT:  .LBB6_11: // %vec.epilog.vector.body
@@ -844,11 +845,11 @@ define i64 @red_mla_dup_ext_u8_s8_s64(ptr noalias noundef readonly captures(none
 ; CHECK-SD-NEXT:    and v4.16b, v4.16b, v3.16b
 ; CHECK-SD-NEXT:    xtn v5.2s, v5.2d
 ; CHECK-SD-NEXT:    xtn v4.2s, v4.2d
-; CHECK-SD-NEXT:    smlal v0.2d, v1.2s, v4.2s
-; CHECK-SD-NEXT:    smlal v2.2d, v1.2s, v5.2s
+; CHECK-SD-NEXT:    smlal v1.2d, v2.2s, v4.2s
+; CHECK-SD-NEXT:    smlal v0.2d, v2.2s, v5.2s
 ; CHECK-SD-NEXT:    b.ne .LBB6_11
 ; CHECK-SD-NEXT:  // %bb.12: // %vec.epilog.middle.block
-; CHECK-SD-NEXT:    add v0.2d, v2.2d, v0.2d
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v1.2d
 ; CHECK-SD-NEXT:    cmp x10, x9
 ; CHECK-SD-NEXT:    addp d0, v0.2d
 ; CHECK-SD-NEXT:    fmov x8, d0
diff --git a/llvm/test/CodeGen/AArch64/aarch64-neonvector-tensorflow-regression.ll b/llvm/test/CodeGen/AArch64/aarch64-neonvector-tensorflow-regression.ll
deleted file mode 100644
index d5a12218b2402..0000000000000
--- a/llvm/test/CodeGen/AArch64/aarch64-neonvector-tensorflow-regression.ll
+++ /dev/null
@@ -1,285 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -start-before=aarch64-isel %s -o /dev/null
-; Regression test for AArch64 compile-time regression, referring to PR #166962.
-source_filename = "third_party/tensorflow/core/kernels/image/resize_bicubic_op.cc"
-target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
-target triple = "aarch64-grtev4-linux-gnu"
-
-%"struct.std::__u::array" = type { [4 x i64] }
-%"class.tensorflow::(anonymous namespace)::CachedInterpolationCalculator" = type { [4 x i64] }
-%"struct.tensorflow::InitOnStartupMarker" = type { i8 }
-
-declare void @llvm.lifetime.start.p0(ptr captures(none))
-
-declare void @llvm.lifetime.end.p0(ptr captures(none))
-
-define fastcc void @_ZN10tensorflow12_GLOBAL__N_125ComputeXWeightsAndIndicesERKNS_17ImageResizerStateEbPNSt3__u6vectorINS0_17WeightsAndIndicesENS4_9allocatorIS6_EEEE(ptr noundef nonnull readonly captures(none) %x_wais) {
-entry:
-  %new_x_indices.i116 = alloca %"struct.std::__u::array", align 8
-  %new_x_indices.i = alloca %"struct.std::__u::array", align 8
-  %calc = alloca %"class.tensorflow::(anonymous namespace)::CachedInterpolationCalculator", align 8
-  call void @llvm.lifetime.start.p0(ptr nonnull %calc)
-  call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(32) %calc, i8 -1, i64 32, i1 false)
-  %0 = load i64, ptr null, align 8
-  br i1 false, label %for.cond.preheader, label %for.cond4.preheader
-
-for.cond4.preheader:                              ; preds = %entry
-  %1 = icmp sgt i64 %0, 0
-  br label %for.body8
-
-for.cond.preheader:                               ; preds = %entry
-  %2 = icmp sgt i64 %0, 0
-  %sunkaddr = getelementptr i8, ptr %x_wais, i64 8
-  %x_wais.val102 = load i64, ptr %sunkaddr, align 8
-  %cmp.i = icmp ult i64 0, %x_wais.val102
-  %x_wais.val101 = load ptr, ptr %x_wais, align 8
-  %3 = load i64, ptr null, align 8
-  %4 = load float, ptr null, align 4
-  %scevgep230 = getelementptr i8, ptr %x_wais.val101, i64 0
-  tail call fastcc void null(float noundef %4, i64 noundef 0, i64 noundef %3, ptr noundef nonnull %scevgep230)
-  %sunkaddr256 = getelementptr i8, ptr %x_wais, i64 8
-  %x_wais.val100 = load i64, ptr %sunkaddr256, align 8
-  %cmp.i103 = icmp ult i64 0, %x_wais.val100
-  %x_wais.val99 = load ptr, ptr %x_wais, align 8
-  %scevgep228 = getelementptr i8, ptr %x_wais.val99, i64 0
-  %scevgep229 = getelementptr i8, ptr %scevgep228, i64 16
-  %5 = load i64, ptr %scevgep229, align 8
-  %scevgep226 = getelementptr i8, ptr %x_wais.val99, i64 0
-  %scevgep227 = getelementptr i8, ptr %scevgep226, i64 24
-  %6 = load i64, ptr %scevgep227, align 8
-  %scevgep224 = getelementptr i8, ptr %x_wais.val99, i64 0
-  %scevgep225 = getelementptr i8, ptr %scevgep224, i64 32
-  %7 = load i64, ptr %scevgep225, align 8
-  %scevgep222 = getelementptr i8, ptr %x_wais.val99, i64 0
-  %scevgep223 = getelementptr i8, ptr %scevgep222, i64 40
-  %8 = load i64, ptr %scevgep223, align 8
-  call void @llvm.lifetime.start.p0(ptr nonnull %new_x_indices.i)
-  store i64 %5, ptr %new_x_indices.i, align 8
-  %sunkaddr257 = getelementptr inbounds i8, ptr %new_x_indices.i, i64 8
-  store i64 %6, ptr %sunkaddr257, align 8
-  %sunkaddr258 = getelementptr inbounds i8, ptr %new_x_indices.i, i64 16
-  store i64 %7, ptr %sunkaddr258, align 8
-  %sunkaddr259 = getelementptr inbounds i8, ptr %new_x_indices.i, i64 24
-  store i64 %8, ptr %sunkaddr259, align 8
-  %9 = load i64, ptr %calc, align 8
-  %cmp4.not.i = icmp eq i64 %9, %5
-  %sunkaddr260 = getelementptr inbounds i8, ptr %calc, i64 8
-  %10 = load i64, ptr %sunkaddr260, align 8
-  %cmp4.152.i = icmp eq i64 %10, %5
-  store i64 %5, ptr %calc, align 8
-  br label %if.end.1.i
-
-if.end.1.i:                                       ; preds = %for.cond.preheader
-  %new_indices_hand.15361.i = phi i64 [ 1, %for.cond.preheader ]
-  %sunkaddr261 = getelementptr inbounds i8, ptr %calc, i64 16
-  %11 = load i64, ptr %sunkaddr261, align 8
-  %arrayidx.i.2.i = getelementptr inbounds nuw i64, ptr %new_x_indices.i, i64 %new_indices_hand.15361.i
-  %12 = load i64, ptr %arrayidx.i.2.i, align 8
-  %cmp4.2.i = icmp eq i64 %11, %12
-  %cmp5.2.i = icmp samesign ult i64 %new_indices_hand.15361.i, 2
-  %arrayidx12.2.i = getelementptr inbounds nuw i64, ptr %calc, i64 %new_indices_hand.15361.i
-  store i64 %11, ptr %arrayidx12.2.i, align 8
-  %inc13.2.i = add nuw nsw i64 %new_indices_hand.15361.i, 1
-  %arrayidx.i.3.i.phi.trans.insert = getelementptr inbounds nuw i64, ptr %new_x_indices.i, i64 %inc13.2.i
-  %.pre189 = load i64, ptr %arrayidx.i.3.i.phi.trans.insert, align 8
-  %sunkaddr262 = getelementptr inbounds i8, ptr %calc, i64 24
-  %13 = load i64, ptr %sunkaddr262, align 8
-  %cmp4.3.i = icmp eq i64 %13, %.pre189
-  %cmp5.3.i = icmp samesign ult i64 %inc13.2.i, 3
-  %arrayidx12.3.i = getelementptr inbounds nuw i64, ptr %calc, i64 %inc13.2.i
-  store i64 %.pre189, ptr %arrayidx12.3.i, align 8
-  %inc13.3.i = add nuw nsw i64 %inc13.2.i, 1
-  %cond = icmp eq i64 %inc13.3.i, 2
-  call void @llvm.lifetime.end.p0(ptr nonnull %new_x_indices.i)
-  %scevgep220 = getelementptr i8, ptr %x_wais.val99, i64 0
-  %scevgep221 = getelementptr i8, ptr %scevgep220, i64 48
-  %14 = trunc i64 %inc13.3.i to i32
-  store i32 %14, ptr %scevgep221, align 8
-  %inc = add nuw nsw i64 0, 1
-  %15 = load i64, ptr null, align 8
-  %lsr.iv.next219 = add nuw i64 0, 56
-  %cmp = icmp slt i64 %inc, %15
-  %cmp26184 = icmp sgt i64 %15, 0
-  br label %for.body28
-
-for.body8:                                        ; preds = %for.body8, %for.cond4.preheader
-  %lsr.iv231 = phi i64 [ 48, %for.cond4.preheader ], [ %lsr.iv.next232, %for.body8 ]
-  %x3.0181 = phi i64 [ 0, %for.cond4.preheader ], [ %inc21, %for.body8 ]
-  %16 = load i64, ptr null, align 8
-  %sunkaddr268 = getelementptr i8, ptr %x_wais, i64 8
-  %x_wais.val98 = load i64, ptr %sunkaddr268, align 8
-  %cmp.i107 = icmp ult i64 %x3.0181, %x_wais.val98
-  %x_wais.val97 = load ptr, ptr %x_wais, align 8
-  %17 = load float, ptr null, align 4
-  %scevgep252 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
-  %tmp = trunc i64 %x3.0181 to i32
-  %conv.i.i = sitofp i32 %tmp to float
-  %mul.i.i = fmul float %17, %conv.i.i
-  %18 = tail call noundef float @llvm.floor.f32(float %mul.i.i)
-  %conv2.i = fptosi float %18 to i64
-  %conv3.i = sitofp i64 %conv2.i to float
-  %sub.i = fsub float %mul.i.i, %conv3.i
-  %mul.i = fmul float %sub.i, 1.024000e+03
-  %call4.i = tail call i64 null(float noundef %mul.i)
-  %19 = load atomic i8, ptr getelementptr inbounds (<{ %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", [2 x i8], ptr, i64, ptr, i64 }>, ptr null, i32 0, i32 18) acquire, align 8
-  %20 = zext i8 %19 to i32
-  %21 = and i32 %20, 1
-  %guard.uninitialized1.i.i = icmp eq i32 %21, 0
-  %22 = tail call i32 null(ptr nonnull getelementptr inbounds (<{ %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", [2 x i8], ptr, i64, ptr, i64 }>, ptr null, i32 0, i32 18))
-  %tobool3.not.i.i = icmp eq i32 %22, 0
-  %call5.i.i = tail call fastcc noundef ptr null(double noundef -7.500000e-01)
-  store ptr %call5.i.i, ptr getelementptr inbounds (<{ %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", [2 x i8], ptr, i64, ptr, i64 }>, ptr null, i32 0, i32 17), align 8
-  tail call void null(ptr nonnull getelementptr inbounds (<{ %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", [2 x i8], ptr, i64, ptr, i64 }>, ptr null, i32 0, i32 18))
-  %retval.0.i.i = load ptr, ptr getelementptr inbounds (<{ %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", %"struct.tensorflow::InitOnStartupMarker", [2 x i8], ptr, i64, ptr, i64 }>, ptr null, i32 0, i32 17), align 8
-  %mul6.i = shl nsw i64 %call4.i, 1
-  %23 = getelementptr float, ptr %retval.0.i.i, i64 %mul6.i
-  %arrayidx.i111 = getelementptr i8, ptr %23, i64 4
-  %24 = load float, ptr %arrayidx.i111, align 4
-  %sunkaddr270 = getelementptr i8, ptr %scevgep252, i64 -48
-  store float %24, ptr %sunkaddr270, align 8
-  %25 = load float, ptr %23, align 4
-  %scevgep250 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
-  %scevgep251 = getelementptr i8, ptr %scevgep250, i64 -44
-  store float %25, ptr %scevgep251, align 4
-  %mul10.i = sub i64 2048, %mul6.i
-  %arrayidx11.i = getelementptr inbounds float, ptr %retval.0.i.i, i64 %mul10.i
-  %26 = load float, ptr %arrayidx11.i, align 4
-  %scevgep248 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
-  %scevgep249 = getelementptr i8, ptr %scevgep248, i64 -40
-  store float %26, ptr %scevgep249, align 8
-  %add14.i = sub i64 2049, %mul6.i
-  %arrayidx15.i = getelementptr inbounds float, ptr %retval.0.i.i, i64 %add14.i
-  %27 = load float, ptr %arrayidx15.i, align 4
-  %scevgep246 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
-  %scevgep247 = getelementptr i8, ptr %scevgep246, i64 -36
-  store float %27, ptr %scevgep247, align 4
-  %sub.i.i = add nsw i64 %16, -1
-  %28 = insertelement <2 x i64> poison, i64 %conv2.i, i64 0
-  %29 = shufflevector <2 x i64> %28, <2 x i64> poison, <2 x i32> zeroinitializer
-  %30 = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %29, <2 x i64> <i64 1, i64 0>)
-  %scevgep244 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
-  %scevgep245 = getelementptr i8, ptr %scevgep244, i64 -32
-  %31 = add nsw <2 x i64> %30, <i64 -1, i64 0>
-  %32 = insertelement <2 x i64> poison, i64 %sub.i.i, i64 0
-  %33 = shufflevector <2 x i64> %32, <2 x i64> poison, <2 x i32> zeroinitializer
-  %34 = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %31, <2 x i64> %33)
-  store <2 x i64> %34, ptr %scevgep245, align 8
-  %35 = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %29, <2 x i64> <i64 -1, i64 -2>)
-  %scevgep242 = getelementptr i8, ptr %x_wais.val97, i64 %lsr.iv231
-  %scevgep243 = getelementptr i8, ptr %scevgep242, i64 -16
-  %36 = add nsw <2 x i64> %35, <i64 1, i64 2>
-  %37 = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %36, <2 x i64> %33)
-  store <2 x i64> %37, ptr %scevgep243, align 8
-  %sunkaddr271 = getelementptr i8, ptr %x_wais, i64 8
-  %x_wais.val96 = load i64, ptr %sunkaddr271, align 8
-  %cmp.i112 = icmp ult i64 %x3.0181, %x_wais.val96
-  %x_wais.val95 = load ptr, ptr %x_wais, align 8
-  %scevgep240 = getelementptr i8, ptr %x_wais.val95, i64 %lsr.iv231
-  %scevgep241 = getelementptr i8, ptr %scevgep240, i64 -32
-  %38 = load i64, ptr %scevgep241, align 8
-  %scevgep238 = getelementptr i8, ptr %x_wais.val95, i64 %lsr.iv231
-  %scevgep239 = getelementptr i8, ptr %scevgep238, i64 -24
-  %39 = load i64, ptr %scevgep239, align 8
-  %scevgep236 = getelementptr i8, ptr %x_wais.val95, i64 %lsr.iv231
-  %scevgep237 = getelementptr i8, ptr %scevgep236, i64 -16
-  %40 = load i64, ptr %scevgep237, align 8
-  %scevgep234 = getelementptr i8, ptr %x_wais.val95, i64 %lsr.iv231
-  %scevgep235 = getelementptr i8, ptr %scevgep234, i64 -8
-  %41 = load i64, ptr %scevgep235, align 8
-  call void @llvm.lifetime.start.p0(ptr nonnull %new_x_indices.i116)
-  store i64 %38, ptr %new_x_indices.i116, align 8
-  %sunkaddr272 = getelementptr inbounds i8, ptr %new_x_indices.i116, i64 8
-  store i64 %39, ptr %sunkaddr272, align 8
-  %sunkaddr273 = getelementptr inbounds i8, ptr %new_x_indices.i116, i64 16
-  store i64 %40, ptr %sunkaddr273, align 8
-  %sunkaddr274 = getelementptr inbounds i8, ptr %new_x_indices.i116, i64 24
-  store i64 %41, ptr %sunkaddr274, align 8
-  %42 = load i64, ptr %calc, align 8
-  %cmp4.not.i120 = icmp eq i64 %42, %38
-  %sunkaddr275 = getelementptr inbounds i8, ptr %calc, i64 8
-  %43 = load i64, ptr %sunkaddr275, align 8
-  %cmp4.1.i161 = icmp eq i64 %43, %39
-  %sunkaddr276 = getelementptr inbounds i8, ptr %calc, i64 16
-  %44 = load i64, ptr %sunkaddr276, align 8
-  %arrayidx.i.2.i128 = getelementptr inbounds nuw i64, ptr %new_x_indices.i116, i64 2
-  %45 = load i64, ptr %arrayidx.i.2.i128, align 8
-  %cmp4.2.i129 = icmp eq i64 %44, %45
-  %cmp5.2.i149 = icmp samesign ult i64 2, 2
-  %arrayidx12.2.i154 = getelementptr inbounds nuw i64, ptr %calc, i64 2
-  store i64 %44, ptr %arrayidx12.2.i154, align 8
-  %inc13.2.i151 = add nuw nsw i64 2, 1
-  %arrayidx.i.3.i134.phi.trans.insert = getelementptr inbounds nuw i64, ptr %new_x_indices.i116, i64 %inc13.2.i151
-  %.pre = load i64, ptr %arrayidx.i.3.i134.phi.trans.insert, align 8
-  %sunkaddr277 = getelementptr inbounds i8, ptr %calc, i64 24
-  %46 = load i64, ptr %sunkaddr277, align 8
-  %cmp4.3.i135 = icmp eq i64 %46, %.pre
-  %cmp5.3.i143 = icmp samesign ult i64 %inc13.2.i151, 3
-  %arrayidx12.3.i147 = getelementptr inbounds nuw i64, ptr %calc, i64 %inc13.2.i151
-  store i64 %.pre, ptr %arrayidx12.3.i147, align 8
-  %inc13.3.i145 = add nuw nsw i64 %inc13.2.i151, 1
-  call void @llvm.lifetime.end.p0(ptr nonnull %new_x_indices.i116)
-  %scevgep233 = getelementptr i8, ptr %x_wais.val95, i64 %lsr.iv231
-  %47 = trunc i64 %inc13.3.i145 to i32
-  store i32 %47, ptr %scevgep233, align 8
-  %inc21 = add nuw nsw i64 %x3.0181, 1
-  %48 = load i64, ptr null, align 8
-  %lsr.iv.next232 = add i64 %lsr.iv231, 56
-  %cmp6 = icmp slt i64 %inc21, %48
-  br label %for.body8
-
-for.body28:                                       ; preds = %for.body28, %if.end.1.i
-  %lsr.iv = phi i64 [ 0, %if.end.1.i ], [ %lsr.iv.next, %for.body28 ]
-  %indvars.iv = phi i64 [ 0, %if.end.1.i ], [ %indvars.iv.next, %for.body28 ]
-  %sunkaddr282 = getelementptr i8, ptr %x_wais, i64 8
-  %x_wais.val94 = load i64, ptr %sunkaddr282, align 8
-  %cmp.i163 = icmp ugt i64 %x_wais.val94, %indvars.iv
-  %x_wais.val93 = load ptr, ptr %x_wais, align 8
-  %49 = load i64, ptr null, align 8
-  %scevgep216 = getelementptr i8, ptr %x_wais.val93, i64 %lsr.iv
-  %scevgep217 = getelementptr i8, ptr %scevgep216, i64 16
-  %50 = load i64, ptr %scevgep217, align 8
-  %mul = mul nsw i64 %50, %49
-  store i64 %mul, ptr %scevgep217, align 8
-  %sunkaddr284 = getelementptr i8, ptr %x_wais, i64 8
-  %x_wais.val92 = load i64, ptr %sunkaddr284, align 8
-  %cmp.i167 = icmp ugt i64 %x_wais.val92, %indvars.iv
-  %x_wais.val91 = load ptr, ptr %x_wais, align 8
-  %51 = load i64, ptr null, align 8
-  %scevgep214 = getelementptr i8, ptr %x_wais.val91, i64 %lsr.iv
-  %scevgep215 = getelementptr i8, ptr %scevgep214, i64 24
-  %52 = load i64, ptr %scevgep215, align 8
-  %mul36 = mul nsw i64 %52, %51
-  store i64 %mul36, ptr %scevgep215, align 8
-  %sunkaddr286 = getelementptr i8, ptr %x_wais, i64 8
-  %x_wais.val90 = load i64, ptr %sunkaddr286, align 8
-  %cmp.i171 = icmp ugt i64 %x_wais.val90, %indvars.iv
-  %x_wais.val89 = load ptr, ptr %x_wais, align 8
-  %53 = load i64, ptr null, align 8
-  %scevgep212 = getelementptr i8, ptr %x_wais.val89, i64 %lsr.iv
-  %scevgep213 = getelementptr i8, ptr %scevgep212, i64 32
-  %54 = load i64, ptr %scevgep213, align 8
-  %mul41 = mul nsw i64 %54, %53
-  store i64 %mul41, ptr %scevgep213, align 8
-  %sunkaddr288 = getelementptr i8, ptr %x_wais, i64 8
-  %x_wais.val88 = load i64, ptr %sunkaddr288, align 8
-  %cmp.i175 = icmp ugt i64 %x_wais.val88, %indvars.iv
-  %x_wais.val = load ptr, ptr %x_wais, align 8
-  %55 = load i64, ptr null, align 8
-  %scevgep = getelementptr i8, ptr %x_wais.val, i64 %lsr.iv
-  %scevgep211 = getelementptr i8, ptr %scevgep, i64 40
-  %56 = load i64, ptr %scevgep211, align 8
-  %mul46 = mul nsw i64 %56, %55
-  store i64 %mul46, ptr %scevgep211, align 8
-  %indvars.iv.next = add nuw i64 %indvars.iv, 1
-  %57 = load i64, ptr null, align 8
-  %lsr.iv.next = add nuw i64 %lsr.iv, 56
-  %cmp26 = icmp sgt i64 %57, %indvars.iv.next
-  br label %for.body28
-}
-
-declare void @llvm.memset.p0.i64(ptr writeonly captures(none), i8, i64, i1 immarg)
-
-declare float @llvm.floor.f32(float)
-
-declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
-
-declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
diff --git a/llvm/test/CodeGen/AArch64/bitcast-extend.ll b/llvm/test/CodeGen/AArch64/bitcast-extend.ll
index b981c1701725a..2bd91a8dc9a7d 100644
--- a/llvm/test/CodeGen/AArch64/bitcast-extend.ll
+++ b/llvm/test/CodeGen/AArch64/bitcast-extend.ll
@@ -341,6 +341,7 @@ define <16 x i8> @load_zext_v16i8(ptr %p) {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    ldr w8, [x0]
 ; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    fmov d0, d0
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: load_zext_v16i8:
diff --git a/llvm/test/CodeGen/AArch64/ctpop.ll b/llvm/test/CodeGen/AArch64/ctpop.ll
index 84984c23f129e..df817afb12368 100644
--- a/llvm/test/CodeGen/AArch64/ctpop.ll
+++ b/llvm/test/CodeGen/AArch64/ctpop.ll
@@ -602,6 +602,7 @@ define i128 @i128_mask(i128 %x) {
 ; CHECK-SD-NEXT:    and x8, x0, #0xff
 ; CHECK-SD-NEXT:    mov x1, xzr
 ; CHECK-SD-NEXT:    fmov d0, x8
+; CHECK-SD-NEXT:    fmov d0, d0
 ; CHECK-SD-NEXT:    cnt v0.16b, v0.16b
 ; CHECK-SD-NEXT:    addv b0, v0.16b
 ; CHECK-SD-NEXT:    fmov x0, d0
diff --git a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
index 137a7feb1a85c..fd7c869fe2f92 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
@@ -829,9 +829,9 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
+; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-CVT-SD-NEXT:    fmov d1, x8
+; CHECK-CVT-SD-NEXT:    fmov d1, x9
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -858,9 +858,9 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
+; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-FP16-SD-NEXT:    fmov d1, x8
+; CHECK-FP16-SD-NEXT:    fmov d1, x9
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
@@ -1296,9 +1296,9 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
+; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-CVT-SD-NEXT:    fmov d1, x8
+; CHECK-CVT-SD-NEXT:    fmov d1, x9
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -1326,9 +1326,9 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
+; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-FP16-SD-NEXT:    fmov d1, x8
+; CHECK-FP16-SD-NEXT:    fmov d1, x9
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
@@ -1748,9 +1748,9 @@ define <2 x i64> @utest_f16i64(<2 x half> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
+; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-CVT-SD-NEXT:    fmov d1, x8
+; CHECK-CVT-SD-NEXT:    fmov d1, x9
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -1778,9 +1778,9 @@ define <2 x i64> @utest_f16i64(<2 x half> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
+; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-FP16-SD-NEXT:    fmov d1, x8
+; CHECK-FP16-SD-NEXT:    fmov d1, x9
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
@@ -2774,9 +2774,9 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
+; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-CVT-SD-NEXT:    fmov d1, x8
+; CHECK-CVT-SD-NEXT:    fmov d1, x9
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -2803,9 +2803,9 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
+; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-FP16-SD-NEXT:    fmov d1, x8
+; CHECK-FP16-SD-NEXT:    fmov d1, x9
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
@@ -3232,9 +3232,9 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
+; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-CVT-SD-NEXT:    fmov d1, x8
+; CHECK-CVT-SD-NEXT:    fmov d1, x9
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -3262,9 +3262,9 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
+; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-FP16-SD-NEXT:    fmov d1, x8
+; CHECK-FP16-SD-NEXT:    fmov d1, x9
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
@@ -3675,9 +3675,9 @@ define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
 ; CHECK-CVT-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-CVT-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-CVT-SD-NEXT:    cmp x20, #0
+; CHECK-CVT-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-CVT-SD-NEXT:    fmov d0, x8
-; CHECK-CVT-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-CVT-SD-NEXT:    fmov d1, x8
+; CHECK-CVT-SD-NEXT:    fmov d1, x9
 ; CHECK-CVT-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CVT-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-CVT-SD-NEXT:    add sp, sp, #48
@@ -3705,9 +3705,9 @@ define <2 x i64> @utest_f16i64_mm(<2 x half> %x) {
 ; CHECK-FP16-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Reload
 ; CHECK-FP16-SD-NEXT:    csel x8, x0, xzr, eq
 ; CHECK-FP16-SD-NEXT:    cmp x20, #0
+; CHECK-FP16-SD-NEXT:    csel x9, x19, xzr, eq
 ; CHECK-FP16-SD-NEXT:    fmov d0, x8
-; CHECK-FP16-SD-NEXT:    csel x8, x19, xzr, eq
-; CHECK-FP16-SD-NEXT:    fmov d1, x8
+; CHECK-FP16-SD-NEXT:    fmov d1, x9
 ; CHECK-FP16-SD-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-FP16-SD-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-FP16-SD-NEXT:    add sp, sp, #48
diff --git a/llvm/test/CodeGen/AArch64/neon-lowhalf128-optimisation.ll b/llvm/test/CodeGen/AArch64/neon-lowhalf128-optimisation.ll
index 38be2992c8211..4e30813187fec 100644
--- a/llvm/test/CodeGen/AArch64/neon-lowhalf128-optimisation.ll
+++ b/llvm/test/CodeGen/AArch64/neon-lowhalf128-optimisation.ll
@@ -5,6 +5,7 @@ define <2 x i64> @low_vector_splat_v2i64_from_i64(i64 %0){
 ; CHECK-LABEL: low_vector_splat_v2i64_from_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    fmov d0, d0
 ; CHECK-NEXT:    ret
   %2 = insertelement <1 x i64> poison, i64 %0, i64 0
   %3 = shufflevector <1 x i64> %2, <1 x i64> zeroinitializer, <2 x i32> <i32 0, i32 1>
diff --git a/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir b/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
index a68eda11d5ca1..aef01e42ed7cc 100644
--- a/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
+++ b/llvm/test/CodeGen/AArch64/peephole-insvigpr.mir
@@ -41,11 +41,6 @@
     ret void
   }
 
-  define void @insert_vec_from_gpr64_zero_high(i64 %v, ptr %dst) {
-  entry:
-    ret void
-  }
-
   attributes #0 = { nocallback nofree nosync nounwind willreturn memory(none) }
 
 ...
@@ -526,50 +521,4 @@ body:             |
     STRSui killed %16, %0, 0 :: (store (s32) into %ir.hist)
     RET_ReallyLR
 
----
-name:            insert_vec_from_gpr64_zero_high
-tracksRegLiveness: true
-registers:
-  - { id: 0, class: gpr64common, preferred-register: '' }
-  - { id: 1, class: gpr64common, preferred-register: '' }
-  - { id: 2, class: fpr64, preferred-register: '' }
-  - { id: 3, class: fpr128, preferred-register: '' }
-  - { id: 4, class: fpr128, preferred-register: '' }
-  - { id: 5, class: fpr64, preferred-register: '' }
-  - { id: 6, class: fpr128, preferred-register: '' }
-  - { id: 7, class: fpr128, preferred-register: '' }
-  - { id: 8, class: fpr128, preferred-register: '' }
-liveins:
-  - { reg: '$x0', virtual-reg: '%0' }
-  - { reg: '$x1', virtual-reg: '%1' }
-body:             |
-  bb.0.entry:
-    liveins: $x0, $x1
-
-    ; CHECK-LABEL: name: insert_vec_from_gpr64_zero_high
-    ; CHECK: liveins: $x0, $x1
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[PTR:%[0-9]+]]:gpr64common = COPY $x0
-    ; CHECK-NEXT: [[VAL:%[0-9]+]]:gpr64common = COPY $x1
-    ; CHECK-NEXT: [[GPR:%[0-9]+]]:gpr64 = COPY [[VAL]]
-    ; CHECK-NEXT: [[FMOV:%[0-9]+]]:fpr64 = FMOVXDr killed [[GPR]]
-    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_LOW:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[FMOV]], %subreg.dsub
-    ; CHECK-NEXT: [[MOVID:%[0-9]+]]:fpr64 = MOVID 0
-    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_ZERO:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], killed [[MOVID]], %subreg.dsub
-    ; CHECK-NEXT: STRQui killed [[INSERT_LOW]], [[PTR]], 0 :: (store (s128) into %ir.dst, align 8)
-    ; CHECK-NEXT: RET_ReallyLR
-    %0:gpr64common = COPY $x0
-    %1:gpr64common = COPY $x1
-    %2:fpr64 = COPY %1
-    %4:fpr128 = IMPLICIT_DEF
-    %3:fpr128 = INSERT_SUBREG %4, %2, %subreg.dsub
-    %5:fpr64 = MOVID 0
-    %7:fpr128 = IMPLICIT_DEF
-    %6:fpr128 = INSERT_SUBREG %7, killed %5, %subreg.dsub
-    %8:fpr128 = INSvi64lane %3, 1, killed %6, 0
-    STRQui killed %8, %0, 0 :: (store (s128) into %ir.dst, align 8)
-    RET_ReallyLR
-
 ...



More information about the llvm-commits mailing list