[llvm] [AArch64] Lower bfloat FADD/SUB to BFMLAL top/bottom instructions (PR #174814)

Benjamin Maxwell via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 9 08:34:10 PST 2026


https://github.com/MacDue updated https://github.com/llvm/llvm-project/pull/174814

>From 886e5d9e22466dd26d4dc80b85a28b860021fe3b Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Fri, 9 Jan 2026 15:55:07 +0000
Subject: [PATCH] [AArch64] Lower bfloat FADD/SUB to BFMLAL top/bottom
 instructions

Assuming all constant/invariant operations are hoisted, this lowering
should improve throughput for Neon and SVE.

Neon: https://godbolt.org/z/eM7qEfExv
SVE: https://godbolt.org/z/8jv19eGf3

This lowering works by extending the even/odd lanes of the LHS to be
used in the accumulators of the BFMLAL instructions. The RHS is then
used as one of the operands to the BFMLAL with the other a constant 1.0
(or -1.0 for FSUB).
---
 .../Target/AArch64/AArch64ISelLowering.cpp    |  79 ++++--
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |   3 +-
 .../CodeGen/AArch64/bf16-v8-instructions.ll   |  73 +++--
 .../AArch64/fixed-length-bf16-arith.ll        |  37 +--
 llvm/test/CodeGen/AArch64/sve-bf16-arith.ll   |  92 ++++---
 .../test/CodeGen/AArch64/sve-bf16-combines.ll | 251 +++++++-----------
 6 files changed, 283 insertions(+), 252 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e9396ae76776b..362140e6d74ae 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1826,22 +1826,28 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
         !Subtarget->isNonStreamingSVEorSME2Available()) {
       for (MVT VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) {
         MVT PromotedVT = VT.changeVectorElementType(MVT::f32);
-        setOperationPromotedToType(ISD::FADD, VT, PromotedVT);
         setOperationPromotedToType(ISD::FMA, VT, PromotedVT);
         setOperationPromotedToType(ISD::FMAXIMUM, VT, PromotedVT);
         setOperationPromotedToType(ISD::FMAXNUM, VT, PromotedVT);
         setOperationPromotedToType(ISD::FMINIMUM, VT, PromotedVT);
         setOperationPromotedToType(ISD::FMINNUM, VT, PromotedVT);
-        setOperationPromotedToType(ISD::FSUB, VT, PromotedVT);
 
-        if (VT != MVT::nxv2bf16 && Subtarget->hasBF16())
+        if (VT != MVT::nxv2bf16 && Subtarget->hasBF16()) {
           setOperationAction(ISD::FMUL, VT, Custom);
-        else
+          setOperationAction(ISD::FADD, VT, Custom);
+          setOperationAction(ISD::FSUB, VT, Custom);
+        } else {
           setOperationPromotedToType(ISD::FMUL, VT, PromotedVT);
+          setOperationPromotedToType(ISD::FADD, VT, PromotedVT);
+          setOperationPromotedToType(ISD::FSUB, VT, PromotedVT);
+        }
       }
 
-      if (Subtarget->hasBF16() && Subtarget->isNeonAvailable())
+      if (Subtarget->hasBF16() && Subtarget->isNeonAvailable()) {
         setOperationAction(ISD::FMUL, MVT::v8bf16, Custom);
+        setOperationAction(ISD::FADD, MVT::v8bf16, Custom);
+        setOperationAction(ISD::FSUB, MVT::v8bf16, Custom);
+      }
     }
 
     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
@@ -7713,17 +7719,21 @@ SDValue AArch64TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
                      EndOfTrmp);
 }
 
-SDValue AArch64TargetLowering::LowerFMUL(SDValue Op, SelectionDAG &DAG) const {
+SDValue
+AArch64TargetLowering::LowerBFloatArithToBFMLAL(SDValue Op,
+                                                SelectionDAG &DAG) const {
   SDLoc DL(Op);
+  unsigned Opcode = Op.getOpcode();
   EVT VT = Op.getValueType();
-  if (VT.getScalarType() != MVT::bf16 ||
+  if ((Opcode != ISD::FADD && Opcode != ISD::FSUB && Opcode != ISD::FMUL) ||
+      VT.getScalarType() != MVT::bf16 ||
       (Subtarget->hasSVEB16B16() &&
        Subtarget->isNonStreamingSVEorSME2Available()))
-    return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMUL_PRED);
+    return SDValue();
 
-  assert(Subtarget->hasBF16() && "Expected +bf16 for custom FMUL lowering");
+  assert(Subtarget->hasBF16() && "Expected +bf16 for custom FMUL/ADD lowering");
   assert((VT == MVT::nxv4bf16 || VT == MVT::nxv8bf16 || VT == MVT::v8bf16) &&
-         "Unexpected FMUL VT");
+         "Unexpected FMUL/ADD VT");
 
   auto MakeGetIntrinsic = [&](Intrinsic::ID IID) {
     return [&, IID](EVT VT, auto... Ops) {
@@ -7757,14 +7767,41 @@ SDValue AArch64TargetLowering::LowerFMUL(SDValue Op, SelectionDAG &DAG) const {
                                     : Intrinsic::aarch64_neon_bfmlalt);
 
   EVT AccVT = UseSVEBFMLAL ? MVT::nxv4f32 : MVT::v4f32;
-  SDValue Zero = DAG.getNeutralElement(ISD::FADD, DL, AccVT, Op->getFlags());
   SDValue Pg = getPredicateForVector(DAG, DL, AccVT);
 
-  // Lower bf16 FMUL as a pair (VT == [nx]v8bf16) of BFMLAL top/bottom
-  // instructions. These result in two f32 vectors, which can be converted back
-  // to bf16 with FCVT and FCVTNT.
-  SDValue LHS = Op.getOperand(0);
-  SDValue RHS = Op.getOperand(1);
+  SDValue BottomAcc, TopAcc, LHS, RHS;
+
+  if (Opcode == ISD::FMUL) {
+    // Set both accumulators to zero.
+    BottomAcc = TopAcc =
+        DAG.getNeutralElement(ISD::FADD, DL, AccVT, Op->getFlags());
+
+    // Lower bf16 FMUL as a pair (VT == [nx]v8bf16) of BFMLAL top/bottom
+    // instructions. These result in two f32 vectors, which can be converted
+    // back to bf16 with FCVT and FCVTNT.
+    LHS = Op.getOperand(0);
+    RHS = Op.getOperand(1);
+  } else if (Opcode == ISD::FADD || Opcode == ISD::FSUB) {
+    // Lower FADD/SUB by extending the LHS to be used as the accumulator, and
+    // multiplying the RHS by 1.0F (or -1.0F for FSUB).
+    if (VT.getVectorMinNumElements() > 4) {
+      SDValue Zero = DAG.getConstantFP(+0.0, DL, VT);
+      // Note: This extends the even/odd lanes to f32. TRN1/2 is used as the
+      // zero is likely cheap/hoisted and can be used to extend the odd lanes.
+      BottomAcc = DAG.getNode(
+          AArch64ISD::NVCAST, DL, AccVT,
+          DAG.getNode(AArch64ISD::TRN1, DL, VT, Zero, Op.getOperand(0)));
+      TopAcc = DAG.getNode(
+          AArch64ISD::NVCAST, DL, AccVT,
+          DAG.getNode(AArch64ISD::TRN2, DL, VT, Zero, Op.getOperand(0)));
+    } else {
+      BottomAcc = DAG.getNode(ISD::FP_EXTEND, DL, AccVT, Op.getOperand(0));
+    }
+    LHS = Op.getOperand(1);
+    RHS = DAG.getConstantFP(Opcode == ISD::FSUB ? -1.0F : 1.0F, DL, VT);
+  } else {
+    llvm_unreachable("Unexpected operation");
+  }
 
   // All SVE intrinsics expect to operate on full bf16 vector types.
   if (UseSVEBFMLAL) {
@@ -7772,14 +7809,15 @@ SDValue AArch64TargetLowering::LowerFMUL(SDValue Op, SelectionDAG &DAG) const {
     RHS = Reinterpret(RHS, MVT::nxv8bf16);
   }
 
-  SDValue BottomF32 = Reinterpret(BFMLALB(AccVT, Zero, LHS, RHS), MVT::nxv4f32);
+  SDValue BottomF32 =
+      Reinterpret(BFMLALB(AccVT, BottomAcc, LHS, RHS), MVT::nxv4f32);
   SDValue BottomBF16 =
       FCVT(MVT::nxv8bf16, DAG.getPOISON(MVT::nxv8bf16), Pg, BottomF32);
   // Note: nxv4bf16 only uses even lanes.
   if (VT == MVT::nxv4bf16)
     return Reinterpret(BottomBF16, VT);
 
-  SDValue TopF32 = Reinterpret(BFMLALT(AccVT, Zero, LHS, RHS), MVT::nxv4f32);
+  SDValue TopF32 = Reinterpret(BFMLALT(AccVT, TopAcc, LHS, RHS), MVT::nxv4f32);
   SDValue TopBF16 = FCVTNT(MVT::nxv8bf16, BottomBF16, Pg, TopF32);
   return Reinterpret(TopBF16, VT);
 }
@@ -7829,6 +7867,9 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   LLVM_DEBUG(dbgs() << "Custom lowering: ");
   LLVM_DEBUG(Op.dump());
 
+  if (SDValue Result = LowerBFloatArithToBFMLAL(Op, DAG))
+    return Result;
+
   switch (Op.getOpcode()) {
   default:
     llvm_unreachable("unimplemented operand");
@@ -7898,7 +7939,7 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   case ISD::FSUB:
     return LowerToPredicatedOp(Op, DAG, AArch64ISD::FSUB_PRED);
   case ISD::FMUL:
-    return LowerFMUL(Op, DAG);
+    return LowerToPredicatedOp(Op, DAG, AArch64ISD::FMUL_PRED);
   case ISD::FMA:
     return LowerFMA(Op, DAG);
   case ISD::FDIV:
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index a066c63304b16..a63a2c6d80197 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -614,7 +614,6 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerStore128(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFMUL(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerFMA(SDValue Op, SelectionDAG &DAG) const;
 
   SDValue LowerMGATHER(SDValue Op, SelectionDAG &DAG) const;
@@ -628,6 +627,8 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
 
+  SDValue LowerBFloatArithToBFMLAL(SDValue Op, SelectionDAG &DAG) const;
+
   bool
   isEligibleForTailCallOptimization(const CallLoweringInfo &CLI) const;
 
diff --git a/llvm/test/CodeGen/AArch64/bf16-v8-instructions.ll b/llvm/test/CodeGen/AArch64/bf16-v8-instructions.ll
index e3c0d97c08f54..7176c6076fa8d 100644
--- a/llvm/test/CodeGen/AArch64/bf16-v8-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/bf16-v8-instructions.ll
@@ -30,17 +30,31 @@ define <8 x bfloat> @add_h(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; CHECK-CVT-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
 ; CHECK-CVT-NEXT:    ret
 ;
-; CHECK-BF16-LABEL: add_h:
-; CHECK-BF16:       // %bb.0: // %entry
-; CHECK-BF16-NEXT:    shll v2.4s, v1.4h, #16
-; CHECK-BF16-NEXT:    shll v3.4s, v0.4h, #16
-; CHECK-BF16-NEXT:    shll2 v1.4s, v1.8h, #16
-; CHECK-BF16-NEXT:    shll2 v0.4s, v0.8h, #16
-; CHECK-BF16-NEXT:    fadd v2.4s, v3.4s, v2.4s
-; CHECK-BF16-NEXT:    fadd v1.4s, v0.4s, v1.4s
-; CHECK-BF16-NEXT:    bfcvtn v0.4h, v2.4s
-; CHECK-BF16-NEXT:    bfcvtn2 v0.8h, v1.4s
-; CHECK-BF16-NEXT:    ret
+; CHECK-NOSVE-BF16-LABEL: add_h:
+; CHECK-NOSVE-BF16:       // %bb.0: // %entry
+; CHECK-NOSVE-BF16-NEXT:    shll v2.4s, v1.4h, #16
+; CHECK-NOSVE-BF16-NEXT:    shll v3.4s, v0.4h, #16
+; CHECK-NOSVE-BF16-NEXT:    shll2 v1.4s, v1.8h, #16
+; CHECK-NOSVE-BF16-NEXT:    shll2 v0.4s, v0.8h, #16
+; CHECK-NOSVE-BF16-NEXT:    fadd v2.4s, v3.4s, v2.4s
+; CHECK-NOSVE-BF16-NEXT:    fadd v1.4s, v0.4s, v1.4s
+; CHECK-NOSVE-BF16-NEXT:    bfcvtn v0.4h, v2.4s
+; CHECK-NOSVE-BF16-NEXT:    bfcvtn2 v0.8h, v1.4s
+; CHECK-NOSVE-BF16-NEXT:    ret
+;
+; CHECK-SVE-BF16-LABEL: add_h:
+; CHECK-SVE-BF16:       // %bb.0: // %entry
+; CHECK-SVE-BF16-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-SVE-BF16-NEXT:    mov z4.h, #16256 // =0x3f80
+; CHECK-SVE-BF16-NEXT:    ptrue p0.s, vl4
+; CHECK-SVE-BF16-NEXT:    trn1 v3.8h, v2.8h, v0.8h
+; CHECK-SVE-BF16-NEXT:    trn2 v2.8h, v2.8h, v0.8h
+; CHECK-SVE-BF16-NEXT:    bfmlalb v3.4s, v1.8h, v4.8h
+; CHECK-SVE-BF16-NEXT:    bfmlalt v2.4s, v1.8h, v4.8h
+; CHECK-SVE-BF16-NEXT:    bfcvt z0.h, p0/m, z3.s
+; CHECK-SVE-BF16-NEXT:    bfcvtnt z0.h, p0/m, z2.s
+; CHECK-SVE-BF16-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-BF16-NEXT:    ret
 entry:
   %0 = fadd <8 x bfloat> %a, %b
   ret <8 x bfloat> %0
@@ -74,17 +88,32 @@ define <8 x bfloat> @sub_h(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; CHECK-CVT-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
 ; CHECK-CVT-NEXT:    ret
 ;
-; CHECK-BF16-LABEL: sub_h:
-; CHECK-BF16:       // %bb.0: // %entry
-; CHECK-BF16-NEXT:    shll v2.4s, v1.4h, #16
-; CHECK-BF16-NEXT:    shll v3.4s, v0.4h, #16
-; CHECK-BF16-NEXT:    shll2 v1.4s, v1.8h, #16
-; CHECK-BF16-NEXT:    shll2 v0.4s, v0.8h, #16
-; CHECK-BF16-NEXT:    fsub v2.4s, v3.4s, v2.4s
-; CHECK-BF16-NEXT:    fsub v1.4s, v0.4s, v1.4s
-; CHECK-BF16-NEXT:    bfcvtn v0.4h, v2.4s
-; CHECK-BF16-NEXT:    bfcvtn2 v0.8h, v1.4s
-; CHECK-BF16-NEXT:    ret
+; CHECK-NOSVE-BF16-LABEL: sub_h:
+; CHECK-NOSVE-BF16:       // %bb.0: // %entry
+; CHECK-NOSVE-BF16-NEXT:    shll v2.4s, v1.4h, #16
+; CHECK-NOSVE-BF16-NEXT:    shll v3.4s, v0.4h, #16
+; CHECK-NOSVE-BF16-NEXT:    shll2 v1.4s, v1.8h, #16
+; CHECK-NOSVE-BF16-NEXT:    shll2 v0.4s, v0.8h, #16
+; CHECK-NOSVE-BF16-NEXT:    fsub v2.4s, v3.4s, v2.4s
+; CHECK-NOSVE-BF16-NEXT:    fsub v1.4s, v0.4s, v1.4s
+; CHECK-NOSVE-BF16-NEXT:    bfcvtn v0.4h, v2.4s
+; CHECK-NOSVE-BF16-NEXT:    bfcvtn2 v0.8h, v1.4s
+; CHECK-NOSVE-BF16-NEXT:    ret
+;
+; CHECK-SVE-BF16-LABEL: sub_h:
+; CHECK-SVE-BF16:       // %bb.0: // %entry
+; CHECK-SVE-BF16-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-SVE-BF16-NEXT:    mov w8, #49024 // =0xbf80
+; CHECK-SVE-BF16-NEXT:    ptrue p0.s, vl4
+; CHECK-SVE-BF16-NEXT:    dup v4.8h, w8
+; CHECK-SVE-BF16-NEXT:    trn1 v3.8h, v2.8h, v0.8h
+; CHECK-SVE-BF16-NEXT:    trn2 v2.8h, v2.8h, v0.8h
+; CHECK-SVE-BF16-NEXT:    bfmlalb v3.4s, v1.8h, v4.8h
+; CHECK-SVE-BF16-NEXT:    bfmlalt v2.4s, v1.8h, v4.8h
+; CHECK-SVE-BF16-NEXT:    bfcvt z0.h, p0/m, z3.s
+; CHECK-SVE-BF16-NEXT:    bfcvtnt z0.h, p0/m, z2.s
+; CHECK-SVE-BF16-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-SVE-BF16-NEXT:    ret
 entry:
   %0 = fsub <8 x bfloat> %a, %b
   ret <8 x bfloat> %0
diff --git a/llvm/test/CodeGen/AArch64/fixed-length-bf16-arith.ll b/llvm/test/CodeGen/AArch64/fixed-length-bf16-arith.ll
index 45f8b2fa95a83..87c98f1ad9cb1 100644
--- a/llvm/test/CodeGen/AArch64/fixed-length-bf16-arith.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-length-bf16-arith.ll
@@ -54,14 +54,16 @@ define <4 x bfloat> @fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 define <8 x bfloat> @fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; NOB16B16-LABEL: fadd_v8bf16:
 ; NOB16B16:       // %bb.0:
-; NOB16B16-NEXT:    shll v2.4s, v1.4h, #16
-; NOB16B16-NEXT:    shll v3.4s, v0.4h, #16
-; NOB16B16-NEXT:    shll2 v1.4s, v1.8h, #16
-; NOB16B16-NEXT:    shll2 v0.4s, v0.8h, #16
-; NOB16B16-NEXT:    fadd v2.4s, v3.4s, v2.4s
-; NOB16B16-NEXT:    fadd v1.4s, v0.4s, v1.4s
-; NOB16B16-NEXT:    bfcvtn v0.4h, v2.4s
-; NOB16B16-NEXT:    bfcvtn2 v0.8h, v1.4s
+; NOB16B16-NEXT:    movi v2.2d, #0000000000000000
+; NOB16B16-NEXT:    mov z4.h, #16256 // =0x3f80
+; NOB16B16-NEXT:    ptrue p0.s, vl4
+; NOB16B16-NEXT:    trn1 v3.8h, v2.8h, v0.8h
+; NOB16B16-NEXT:    trn2 v2.8h, v2.8h, v0.8h
+; NOB16B16-NEXT:    bfmlalb v3.4s, v1.8h, v4.8h
+; NOB16B16-NEXT:    bfmlalt v2.4s, v1.8h, v4.8h
+; NOB16B16-NEXT:    bfcvt z0.h, p0/m, z3.s
+; NOB16B16-NEXT:    bfcvtnt z0.h, p0/m, z2.s
+; NOB16B16-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; NOB16B16-NEXT:    ret
 ;
 ; B16B16-LABEL: fadd_v8bf16:
@@ -913,14 +915,17 @@ define <4 x bfloat> @fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
 define <8 x bfloat> @fsub_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
 ; NOB16B16-LABEL: fsub_v8bf16:
 ; NOB16B16:       // %bb.0:
-; NOB16B16-NEXT:    shll v2.4s, v1.4h, #16
-; NOB16B16-NEXT:    shll v3.4s, v0.4h, #16
-; NOB16B16-NEXT:    shll2 v1.4s, v1.8h, #16
-; NOB16B16-NEXT:    shll2 v0.4s, v0.8h, #16
-; NOB16B16-NEXT:    fsub v2.4s, v3.4s, v2.4s
-; NOB16B16-NEXT:    fsub v1.4s, v0.4s, v1.4s
-; NOB16B16-NEXT:    bfcvtn v0.4h, v2.4s
-; NOB16B16-NEXT:    bfcvtn2 v0.8h, v1.4s
+; NOB16B16-NEXT:    movi v2.2d, #0000000000000000
+; NOB16B16-NEXT:    mov w8, #49024 // =0xbf80
+; NOB16B16-NEXT:    ptrue p0.s, vl4
+; NOB16B16-NEXT:    dup v4.8h, w8
+; NOB16B16-NEXT:    trn1 v3.8h, v2.8h, v0.8h
+; NOB16B16-NEXT:    trn2 v2.8h, v2.8h, v0.8h
+; NOB16B16-NEXT:    bfmlalb v3.4s, v1.8h, v4.8h
+; NOB16B16-NEXT:    bfmlalt v2.4s, v1.8h, v4.8h
+; NOB16B16-NEXT:    bfcvt z0.h, p0/m, z3.s
+; NOB16B16-NEXT:    bfcvtnt z0.h, p0/m, z2.s
+; NOB16B16-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; NOB16B16-NEXT:    ret
 ;
 ; B16B16-LABEL: fsub_v8bf16:
diff --git a/llvm/test/CodeGen/AArch64/sve-bf16-arith.ll b/llvm/test/CodeGen/AArch64/sve-bf16-arith.ll
index 6917ac12999bf..89d2f5485fd38 100644
--- a/llvm/test/CodeGen/AArch64/sve-bf16-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-bf16-arith.ll
@@ -66,10 +66,10 @@ define <vscale x 2 x bfloat> @fadd_nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x
 define <vscale x 4 x bfloat> @fadd_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
 ; NOB16B16-LABEL: fadd_nxv4bf16:
 ; NOB16B16:       // %bb.0:
-; NOB16B16-NEXT:    lsl z1.s, z1.s, #16
+; NOB16B16-NEXT:    fmov z2.h, #1.87500000
 ; NOB16B16-NEXT:    lsl z0.s, z0.s, #16
 ; NOB16B16-NEXT:    ptrue p0.s
-; NOB16B16-NEXT:    fadd z0.s, z0.s, z1.s
+; NOB16B16-NEXT:    bfmlalb z0.s, z1.h, z2.h
 ; NOB16B16-NEXT:    bfcvt z0.h, p0/m, z0.s
 ; NOB16B16-NEXT:    ret
 ;
@@ -83,28 +83,36 @@ define <vscale x 4 x bfloat> @fadd_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x
 }
 
 define <vscale x 8 x bfloat> @fadd_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
-; NOB16B16-LABEL: fadd_nxv8bf16:
-; NOB16B16:       // %bb.0:
-; NOB16B16-NEXT:    uunpkhi z2.s, z1.h
-; NOB16B16-NEXT:    uunpkhi z3.s, z0.h
-; NOB16B16-NEXT:    uunpklo z1.s, z1.h
-; NOB16B16-NEXT:    uunpklo z0.s, z0.h
-; NOB16B16-NEXT:    ptrue p0.s
-; NOB16B16-NEXT:    lsl z2.s, z2.s, #16
-; NOB16B16-NEXT:    lsl z3.s, z3.s, #16
-; NOB16B16-NEXT:    lsl z1.s, z1.s, #16
-; NOB16B16-NEXT:    lsl z0.s, z0.s, #16
-; NOB16B16-NEXT:    fadd z2.s, z3.s, z2.s
-; NOB16B16-NEXT:    fadd z0.s, z0.s, z1.s
-; NOB16B16-NEXT:    bfcvt z1.h, p0/m, z2.s
-; NOB16B16-NEXT:    bfcvt z0.h, p0/m, z0.s
-; NOB16B16-NEXT:    uzp1 z0.h, z0.h, z1.h
-; NOB16B16-NEXT:    ret
+; NOB16B16-NONSTREAMING-LABEL: fadd_nxv8bf16:
+; NOB16B16-NONSTREAMING:       // %bb.0:
+; NOB16B16-NONSTREAMING-NEXT:    movi v2.2d, #0000000000000000
+; NOB16B16-NONSTREAMING-NEXT:    fmov z3.h, #1.87500000
+; NOB16B16-NONSTREAMING-NEXT:    ptrue p0.s
+; NOB16B16-NONSTREAMING-NEXT:    trn1 z4.h, z2.h, z0.h
+; NOB16B16-NONSTREAMING-NEXT:    trn2 z2.h, z2.h, z0.h
+; NOB16B16-NONSTREAMING-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; NOB16B16-NONSTREAMING-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; NOB16B16-NONSTREAMING-NEXT:    bfcvt z0.h, p0/m, z4.s
+; NOB16B16-NONSTREAMING-NEXT:    bfcvtnt z0.h, p0/m, z2.s
+; NOB16B16-NONSTREAMING-NEXT:    ret
 ;
 ; B16B16-LABEL: fadd_nxv8bf16:
 ; B16B16:       // %bb.0:
 ; B16B16-NEXT:    bfadd z0.h, z0.h, z1.h
 ; B16B16-NEXT:    ret
+;
+; NOB16B16-STREAMING-LABEL: fadd_nxv8bf16:
+; NOB16B16-STREAMING:       // %bb.0:
+; NOB16B16-STREAMING-NEXT:    mov z2.h, #0 // =0x0
+; NOB16B16-STREAMING-NEXT:    fmov z3.h, #1.87500000
+; NOB16B16-STREAMING-NEXT:    ptrue p0.s
+; NOB16B16-STREAMING-NEXT:    trn1 z4.h, z2.h, z0.h
+; NOB16B16-STREAMING-NEXT:    trn2 z2.h, z2.h, z0.h
+; NOB16B16-STREAMING-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; NOB16B16-STREAMING-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; NOB16B16-STREAMING-NEXT:    bfcvt z0.h, p0/m, z4.s
+; NOB16B16-STREAMING-NEXT:    bfcvtnt z0.h, p0/m, z2.s
+; NOB16B16-STREAMING-NEXT:    ret
   %res = fadd <vscale x 8 x bfloat> %a, %b
   ret <vscale x 8 x bfloat> %res
 }
@@ -717,10 +725,10 @@ define <vscale x 2 x bfloat> @fsub_nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x
 define <vscale x 4 x bfloat> @fsub_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
 ; NOB16B16-LABEL: fsub_nxv4bf16:
 ; NOB16B16:       // %bb.0:
-; NOB16B16-NEXT:    lsl z1.s, z1.s, #16
+; NOB16B16-NEXT:    fmov z2.h, #-1.87500000
 ; NOB16B16-NEXT:    lsl z0.s, z0.s, #16
 ; NOB16B16-NEXT:    ptrue p0.s
-; NOB16B16-NEXT:    fsub z0.s, z0.s, z1.s
+; NOB16B16-NEXT:    bfmlalb z0.s, z1.h, z2.h
 ; NOB16B16-NEXT:    bfcvt z0.h, p0/m, z0.s
 ; NOB16B16-NEXT:    ret
 ;
@@ -734,28 +742,36 @@ define <vscale x 4 x bfloat> @fsub_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x
 }
 
 define <vscale x 8 x bfloat> @fsub_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
-; NOB16B16-LABEL: fsub_nxv8bf16:
-; NOB16B16:       // %bb.0:
-; NOB16B16-NEXT:    uunpkhi z2.s, z1.h
-; NOB16B16-NEXT:    uunpkhi z3.s, z0.h
-; NOB16B16-NEXT:    uunpklo z1.s, z1.h
-; NOB16B16-NEXT:    uunpklo z0.s, z0.h
-; NOB16B16-NEXT:    ptrue p0.s
-; NOB16B16-NEXT:    lsl z2.s, z2.s, #16
-; NOB16B16-NEXT:    lsl z3.s, z3.s, #16
-; NOB16B16-NEXT:    lsl z1.s, z1.s, #16
-; NOB16B16-NEXT:    lsl z0.s, z0.s, #16
-; NOB16B16-NEXT:    fsub z2.s, z3.s, z2.s
-; NOB16B16-NEXT:    fsub z0.s, z0.s, z1.s
-; NOB16B16-NEXT:    bfcvt z1.h, p0/m, z2.s
-; NOB16B16-NEXT:    bfcvt z0.h, p0/m, z0.s
-; NOB16B16-NEXT:    uzp1 z0.h, z0.h, z1.h
-; NOB16B16-NEXT:    ret
+; NOB16B16-NONSTREAMING-LABEL: fsub_nxv8bf16:
+; NOB16B16-NONSTREAMING:       // %bb.0:
+; NOB16B16-NONSTREAMING-NEXT:    movi v2.2d, #0000000000000000
+; NOB16B16-NONSTREAMING-NEXT:    fmov z3.h, #-1.87500000
+; NOB16B16-NONSTREAMING-NEXT:    ptrue p0.s
+; NOB16B16-NONSTREAMING-NEXT:    trn1 z4.h, z2.h, z0.h
+; NOB16B16-NONSTREAMING-NEXT:    trn2 z2.h, z2.h, z0.h
+; NOB16B16-NONSTREAMING-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; NOB16B16-NONSTREAMING-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; NOB16B16-NONSTREAMING-NEXT:    bfcvt z0.h, p0/m, z4.s
+; NOB16B16-NONSTREAMING-NEXT:    bfcvtnt z0.h, p0/m, z2.s
+; NOB16B16-NONSTREAMING-NEXT:    ret
 ;
 ; B16B16-LABEL: fsub_nxv8bf16:
 ; B16B16:       // %bb.0:
 ; B16B16-NEXT:    bfsub z0.h, z0.h, z1.h
 ; B16B16-NEXT:    ret
+;
+; NOB16B16-STREAMING-LABEL: fsub_nxv8bf16:
+; NOB16B16-STREAMING:       // %bb.0:
+; NOB16B16-STREAMING-NEXT:    mov z2.h, #0 // =0x0
+; NOB16B16-STREAMING-NEXT:    fmov z3.h, #-1.87500000
+; NOB16B16-STREAMING-NEXT:    ptrue p0.s
+; NOB16B16-STREAMING-NEXT:    trn1 z4.h, z2.h, z0.h
+; NOB16B16-STREAMING-NEXT:    trn2 z2.h, z2.h, z0.h
+; NOB16B16-STREAMING-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; NOB16B16-STREAMING-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; NOB16B16-STREAMING-NEXT:    bfcvt z0.h, p0/m, z4.s
+; NOB16B16-STREAMING-NEXT:    bfcvtnt z0.h, p0/m, z2.s
+; NOB16B16-STREAMING-NEXT:    ret
   %res = fsub <vscale x 8 x bfloat> %a, %b
   ret <vscale x 8 x bfloat> %res
 }
diff --git a/llvm/test/CodeGen/AArch64/sve-bf16-combines.ll b/llvm/test/CodeGen/AArch64/sve-bf16-combines.ll
index a8049806a679b..64fea57ffe66a 100644
--- a/llvm/test/CodeGen/AArch64/sve-bf16-combines.ll
+++ b/llvm/test/CodeGen/AArch64/sve-bf16-combines.ll
@@ -292,20 +292,15 @@ define <vscale x 2 x bfloat> @fmls_sel_nxv2bf16(<vscale x 2 x i1> %pred, <vscale
 define <vscale x 8 x bfloat> @fadd_sel_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> %mask) {
 ; SVE-LABEL: fadd_sel_nxv8bf16:
 ; SVE:       // %bb.0:
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpkhi z3.s, z0.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    uunpklo z4.s, z0.h
+; SVE-NEXT:    movi v2.2d, #0000000000000000
+; SVE-NEXT:    fmov z3.h, #1.87500000
 ; SVE-NEXT:    ptrue p1.s
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    lsl z4.s, z4.s, #16
-; SVE-NEXT:    fadd z2.s, z3.s, z2.s
-; SVE-NEXT:    fadd z1.s, z4.s, z1.s
-; SVE-NEXT:    bfcvt z2.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z1.s
-; SVE-NEXT:    uzp1 z1.h, z1.h, z2.h
+; SVE-NEXT:    trn1 z4.h, z2.h, z0.h
+; SVE-NEXT:    trn2 z2.h, z2.h, z0.h
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; SVE-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; SVE-NEXT:    bfcvt z1.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z1.h, p1/m, z2.s
 ; SVE-NEXT:    mov z0.h, p0/m, z1.h
 ; SVE-NEXT:    ret
 ;
@@ -321,20 +316,15 @@ define <vscale x 8 x bfloat> @fadd_sel_nxv8bf16(<vscale x 8 x bfloat> %a, <vscal
 define <vscale x 8 x bfloat> @fsub_sel_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> %mask) {
 ; SVE-LABEL: fsub_sel_nxv8bf16:
 ; SVE:       // %bb.0:
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpkhi z3.s, z0.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    uunpklo z4.s, z0.h
+; SVE-NEXT:    movi v2.2d, #0000000000000000
+; SVE-NEXT:    fmov z3.h, #-1.87500000
 ; SVE-NEXT:    ptrue p1.s
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    lsl z4.s, z4.s, #16
-; SVE-NEXT:    fsub z2.s, z3.s, z2.s
-; SVE-NEXT:    fsub z1.s, z4.s, z1.s
-; SVE-NEXT:    bfcvt z2.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z1.s
-; SVE-NEXT:    uzp1 z1.h, z1.h, z2.h
+; SVE-NEXT:    trn1 z4.h, z2.h, z0.h
+; SVE-NEXT:    trn2 z2.h, z2.h, z0.h
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; SVE-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; SVE-NEXT:    bfcvt z1.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z1.h, p1/m, z2.s
 ; SVE-NEXT:    mov z0.h, p0/m, z1.h
 ; SVE-NEXT:    ret
 ;
@@ -350,20 +340,15 @@ define <vscale x 8 x bfloat> @fsub_sel_nxv8bf16(<vscale x 8 x bfloat> %a, <vscal
 define <vscale x 8 x bfloat> @fadd_sel_negzero_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> %mask) {
 ; SVE-LABEL: fadd_sel_negzero_nxv8bf16:
 ; SVE:       // %bb.0:
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpkhi z3.s, z0.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    uunpklo z4.s, z0.h
+; SVE-NEXT:    movi v2.2d, #0000000000000000
+; SVE-NEXT:    fmov z3.h, #1.87500000
 ; SVE-NEXT:    ptrue p1.s
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    lsl z4.s, z4.s, #16
-; SVE-NEXT:    fadd z2.s, z3.s, z2.s
-; SVE-NEXT:    fadd z1.s, z4.s, z1.s
-; SVE-NEXT:    bfcvt z2.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z1.s
-; SVE-NEXT:    uzp1 z1.h, z1.h, z2.h
+; SVE-NEXT:    trn1 z4.h, z2.h, z0.h
+; SVE-NEXT:    trn2 z2.h, z2.h, z0.h
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; SVE-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; SVE-NEXT:    bfcvt z1.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z1.h, p1/m, z2.s
 ; SVE-NEXT:    mov z0.h, p0/m, z1.h
 ; SVE-NEXT:    ret
 ;
@@ -380,20 +365,15 @@ define <vscale x 8 x bfloat> @fadd_sel_negzero_nxv8bf16(<vscale x 8 x bfloat> %a
 define <vscale x 8 x bfloat> @fsub_sel_negzero_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> %mask) {
 ; SVE-LABEL: fsub_sel_negzero_nxv8bf16:
 ; SVE:       // %bb.0:
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpkhi z3.s, z0.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    uunpklo z4.s, z0.h
+; SVE-NEXT:    movi v2.2d, #0000000000000000
+; SVE-NEXT:    fmov z3.h, #-1.87500000
 ; SVE-NEXT:    ptrue p1.s
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    lsl z4.s, z4.s, #16
-; SVE-NEXT:    fsub z2.s, z3.s, z2.s
-; SVE-NEXT:    fsub z1.s, z4.s, z1.s
-; SVE-NEXT:    bfcvt z2.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z1.s
-; SVE-NEXT:    uzp1 z1.h, z1.h, z2.h
+; SVE-NEXT:    trn1 z4.h, z2.h, z0.h
+; SVE-NEXT:    trn2 z2.h, z2.h, z0.h
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; SVE-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; SVE-NEXT:    bfcvt z1.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z1.h, p1/m, z2.s
 ; SVE-NEXT:    mov z0.h, p0/m, z1.h
 ; SVE-NEXT:    ret
 ;
@@ -417,21 +397,15 @@ define <vscale x 8 x bfloat> @fadd_sel_fmul_nxv8bf16(<vscale x 8 x bfloat> %a, <
 ; SVE-NEXT:    bfmlalt z4.s, z1.h, z2.h
 ; SVE-NEXT:    movi v2.2d, #0000000000000000
 ; SVE-NEXT:    bfcvt z1.h, p1/m, z3.s
-; SVE-NEXT:    uunpkhi z3.s, z0.h
-; SVE-NEXT:    uunpklo z0.s, z0.h
+; SVE-NEXT:    fmov z3.h, #1.87500000
 ; SVE-NEXT:    bfcvtnt z1.h, p1/m, z4.s
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    lsl z0.s, z0.s, #16
+; SVE-NEXT:    trn1 z4.h, z2.h, z0.h
 ; SVE-NEXT:    sel z1.h, p0, z1.h, z2.h
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    fadd z2.s, z3.s, z2.s
-; SVE-NEXT:    fadd z0.s, z0.s, z1.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z0.h, p1/m, z0.s
-; SVE-NEXT:    uzp1 z0.h, z0.h, z1.h
+; SVE-NEXT:    trn2 z2.h, z2.h, z0.h
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; SVE-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; SVE-NEXT:    bfcvt z0.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z0.h, p1/m, z2.s
 ; SVE-NEXT:    ret
 ;
 ; SVE-B16B16-LABEL: fadd_sel_fmul_nxv8bf16:
@@ -455,21 +429,16 @@ define <vscale x 8 x bfloat> @fsub_sel_fmul_nxv8bf16(<vscale x 8 x bfloat> %a, <
 ; SVE-NEXT:    ptrue p1.s
 ; SVE-NEXT:    bfmlalb z3.s, z1.h, z2.h
 ; SVE-NEXT:    bfmlalt z4.s, z1.h, z2.h
+; SVE-NEXT:    movi v2.2d, #0000000000000000
 ; SVE-NEXT:    bfcvt z1.h, p1/m, z3.s
-; SVE-NEXT:    uunpkhi z3.s, z0.h
+; SVE-NEXT:    fmov z3.h, #-1.87500000
 ; SVE-NEXT:    bfcvtnt z1.h, p1/m, z4.s
-; SVE-NEXT:    uunpklo z4.s, z0.h
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    lsl z4.s, z4.s, #16
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    fsub z2.s, z3.s, z2.s
-; SVE-NEXT:    fsub z1.s, z4.s, z1.s
-; SVE-NEXT:    bfcvt z2.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z1.s
-; SVE-NEXT:    uzp1 z1.h, z1.h, z2.h
+; SVE-NEXT:    trn1 z4.h, z2.h, z0.h
+; SVE-NEXT:    trn2 z2.h, z2.h, z0.h
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; SVE-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; SVE-NEXT:    bfcvt z1.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z1.h, p1/m, z2.s
 ; SVE-NEXT:    mov z0.h, p0/m, z1.h
 ; SVE-NEXT:    ret
 ;
@@ -491,21 +460,16 @@ define <vscale x 8 x bfloat> @fadd_sel_fmul_nsz_nxv8bf16(<vscale x 8 x bfloat> %
 ; SVE-NEXT:    ptrue p1.s
 ; SVE-NEXT:    bfmlalb z3.s, z1.h, z2.h
 ; SVE-NEXT:    bfmlalt z4.s, z1.h, z2.h
+; SVE-NEXT:    movi v2.2d, #0000000000000000
 ; SVE-NEXT:    bfcvt z1.h, p1/m, z3.s
-; SVE-NEXT:    uunpkhi z3.s, z0.h
+; SVE-NEXT:    fmov z3.h, #1.87500000
 ; SVE-NEXT:    bfcvtnt z1.h, p1/m, z4.s
-; SVE-NEXT:    uunpklo z4.s, z0.h
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    lsl z4.s, z4.s, #16
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    fadd z2.s, z3.s, z2.s
-; SVE-NEXT:    fadd z1.s, z4.s, z1.s
-; SVE-NEXT:    bfcvt z2.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z1.s
-; SVE-NEXT:    uzp1 z1.h, z1.h, z2.h
+; SVE-NEXT:    trn1 z4.h, z2.h, z0.h
+; SVE-NEXT:    trn2 z2.h, z2.h, z0.h
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; SVE-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; SVE-NEXT:    bfcvt z1.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z1.h, p1/m, z2.s
 ; SVE-NEXT:    mov z0.h, p0/m, z1.h
 ; SVE-NEXT:    ret
 ;
@@ -527,21 +491,16 @@ define <vscale x 8 x bfloat> @fsub_sel_fmul_nsz_nxv8bf16(<vscale x 8 x bfloat> %
 ; SVE-NEXT:    ptrue p1.s
 ; SVE-NEXT:    bfmlalb z3.s, z1.h, z2.h
 ; SVE-NEXT:    bfmlalt z4.s, z1.h, z2.h
+; SVE-NEXT:    movi v2.2d, #0000000000000000
 ; SVE-NEXT:    bfcvt z1.h, p1/m, z3.s
-; SVE-NEXT:    uunpkhi z3.s, z0.h
+; SVE-NEXT:    fmov z3.h, #-1.87500000
 ; SVE-NEXT:    bfcvtnt z1.h, p1/m, z4.s
-; SVE-NEXT:    uunpklo z4.s, z0.h
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    lsl z4.s, z4.s, #16
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    fsub z2.s, z3.s, z2.s
-; SVE-NEXT:    fsub z1.s, z4.s, z1.s
-; SVE-NEXT:    bfcvt z2.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z1.s
-; SVE-NEXT:    uzp1 z1.h, z1.h, z2.h
+; SVE-NEXT:    trn1 z4.h, z2.h, z0.h
+; SVE-NEXT:    trn2 z2.h, z2.h, z0.h
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; SVE-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; SVE-NEXT:    bfcvt z1.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z1.h, p1/m, z2.s
 ; SVE-NEXT:    mov z0.h, p0/m, z1.h
 ; SVE-NEXT:    ret
 ;
@@ -563,21 +522,16 @@ define <vscale x 8 x bfloat> @fadd_sel_fmul_negzero_nxv8bf16(<vscale x 8 x bfloa
 ; SVE-NEXT:    ptrue p1.s
 ; SVE-NEXT:    bfmlalb z3.s, z1.h, z2.h
 ; SVE-NEXT:    bfmlalt z4.s, z1.h, z2.h
+; SVE-NEXT:    movi v2.2d, #0000000000000000
 ; SVE-NEXT:    bfcvt z1.h, p1/m, z3.s
-; SVE-NEXT:    uunpkhi z3.s, z0.h
+; SVE-NEXT:    fmov z3.h, #1.87500000
 ; SVE-NEXT:    bfcvtnt z1.h, p1/m, z4.s
-; SVE-NEXT:    uunpklo z4.s, z0.h
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    lsl z4.s, z4.s, #16
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    fadd z2.s, z3.s, z2.s
-; SVE-NEXT:    fadd z1.s, z4.s, z1.s
-; SVE-NEXT:    bfcvt z2.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z1.s
-; SVE-NEXT:    uzp1 z1.h, z1.h, z2.h
+; SVE-NEXT:    trn1 z4.h, z2.h, z0.h
+; SVE-NEXT:    trn2 z2.h, z2.h, z0.h
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; SVE-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; SVE-NEXT:    bfcvt z1.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z1.h, p1/m, z2.s
 ; SVE-NEXT:    mov z0.h, p0/m, z1.h
 ; SVE-NEXT:    ret
 ;
@@ -602,21 +556,16 @@ define <vscale x 8 x bfloat> @fsub_sel_fmul_negzero_nxv8bf16(<vscale x 8 x bfloa
 ; SVE-NEXT:    bfmlalt z4.s, z1.h, z2.h
 ; SVE-NEXT:    dupm z2.h, #0x8000
 ; SVE-NEXT:    bfcvt z1.h, p1/m, z3.s
-; SVE-NEXT:    uunpkhi z3.s, z0.h
-; SVE-NEXT:    uunpklo z0.s, z0.h
+; SVE-NEXT:    movi v3.2d, #0000000000000000
 ; SVE-NEXT:    bfcvtnt z1.h, p1/m, z4.s
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    lsl z0.s, z0.s, #16
+; SVE-NEXT:    trn1 z4.h, z3.h, z0.h
+; SVE-NEXT:    trn2 z3.h, z3.h, z0.h
 ; SVE-NEXT:    sel z1.h, p0, z1.h, z2.h
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    fsub z2.s, z3.s, z2.s
-; SVE-NEXT:    fsub z0.s, z0.s, z1.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z0.h, p1/m, z0.s
-; SVE-NEXT:    uzp1 z0.h, z0.h, z1.h
+; SVE-NEXT:    fmov z2.h, #-1.87500000
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z2.h
+; SVE-NEXT:    bfmlalt z3.s, z1.h, z2.h
+; SVE-NEXT:    bfcvt z0.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z0.h, p1/m, z3.s
 ; SVE-NEXT:    ret
 ;
 ; SVE-B16B16-LABEL: fsub_sel_fmul_negzero_nxv8bf16:
@@ -641,21 +590,16 @@ define <vscale x 8 x bfloat> @fadd_sel_fmul_negzero_nsz_nxv8bf16(<vscale x 8 x b
 ; SVE-NEXT:    ptrue p1.s
 ; SVE-NEXT:    bfmlalb z3.s, z1.h, z2.h
 ; SVE-NEXT:    bfmlalt z4.s, z1.h, z2.h
+; SVE-NEXT:    movi v2.2d, #0000000000000000
 ; SVE-NEXT:    bfcvt z1.h, p1/m, z3.s
-; SVE-NEXT:    uunpkhi z3.s, z0.h
+; SVE-NEXT:    fmov z3.h, #1.87500000
 ; SVE-NEXT:    bfcvtnt z1.h, p1/m, z4.s
-; SVE-NEXT:    uunpklo z4.s, z0.h
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    lsl z4.s, z4.s, #16
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    fadd z2.s, z3.s, z2.s
-; SVE-NEXT:    fadd z1.s, z4.s, z1.s
-; SVE-NEXT:    bfcvt z2.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z1.s
-; SVE-NEXT:    uzp1 z1.h, z1.h, z2.h
+; SVE-NEXT:    trn1 z4.h, z2.h, z0.h
+; SVE-NEXT:    trn2 z2.h, z2.h, z0.h
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; SVE-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; SVE-NEXT:    bfcvt z1.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z1.h, p1/m, z2.s
 ; SVE-NEXT:    mov z0.h, p0/m, z1.h
 ; SVE-NEXT:    ret
 ;
@@ -678,21 +622,16 @@ define <vscale x 8 x bfloat> @fsub_sel_fmul_negzero_nsz_nxv8bf16(<vscale x 8 x b
 ; SVE-NEXT:    ptrue p1.s
 ; SVE-NEXT:    bfmlalb z3.s, z1.h, z2.h
 ; SVE-NEXT:    bfmlalt z4.s, z1.h, z2.h
+; SVE-NEXT:    movi v2.2d, #0000000000000000
 ; SVE-NEXT:    bfcvt z1.h, p1/m, z3.s
-; SVE-NEXT:    uunpkhi z3.s, z0.h
+; SVE-NEXT:    fmov z3.h, #-1.87500000
 ; SVE-NEXT:    bfcvtnt z1.h, p1/m, z4.s
-; SVE-NEXT:    uunpklo z4.s, z0.h
-; SVE-NEXT:    lsl z3.s, z3.s, #16
-; SVE-NEXT:    uunpkhi z2.s, z1.h
-; SVE-NEXT:    uunpklo z1.s, z1.h
-; SVE-NEXT:    lsl z4.s, z4.s, #16
-; SVE-NEXT:    lsl z2.s, z2.s, #16
-; SVE-NEXT:    lsl z1.s, z1.s, #16
-; SVE-NEXT:    fsub z2.s, z3.s, z2.s
-; SVE-NEXT:    fsub z1.s, z4.s, z1.s
-; SVE-NEXT:    bfcvt z2.h, p1/m, z2.s
-; SVE-NEXT:    bfcvt z1.h, p1/m, z1.s
-; SVE-NEXT:    uzp1 z1.h, z1.h, z2.h
+; SVE-NEXT:    trn1 z4.h, z2.h, z0.h
+; SVE-NEXT:    trn2 z2.h, z2.h, z0.h
+; SVE-NEXT:    bfmlalb z4.s, z1.h, z3.h
+; SVE-NEXT:    bfmlalt z2.s, z1.h, z3.h
+; SVE-NEXT:    bfcvt z1.h, p1/m, z4.s
+; SVE-NEXT:    bfcvtnt z1.h, p1/m, z2.s
 ; SVE-NEXT:    mov z0.h, p0/m, z1.h
 ; SVE-NEXT:    ret
 ;



More information about the llvm-commits mailing list