[llvm] [LLVM][CodeGen][SVE] Use unpredicated instructions when there's no strictfp requirement. (PR #88166)

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 9 10:44:30 PDT 2024


https://github.com/paulwalker-arm created https://github.com/llvm/llvm-project/pull/88166

SVE has unpredicated FADD, FMUL and FSUB instructions that have no
register restrictions. We already use these for all true predicates,
but we can also use then when we're allowed to potentially introduce
new trapping behaviour.
    
Fixes https://github.com/llvm/llvm-project/issues/56572

>From fc356efef387bad877d1129978f9b3010bbf9822 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Tue, 9 Apr 2024 16:54:51 +0000
Subject: [PATCH 1/2] Add tests for strict fp add, mul and sub.

---
 .../AArch64/sve-intrinsics-fp-arith-undef.ll  | 148 ++++++++++++++++++
 .../AArch64/sve2p1-intrinsics-bfadd.ll        |   9 ++
 .../AArch64/sve2p1-intrinsics-bfmul.ll        |   9 ++
 .../AArch64/sve2p1-intrinsics-bfsub.ll        |   9 ++
 4 files changed, 175 insertions(+)

diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-undef.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-undef.ll
index a77bcdf38643df..7d5e22882cf701 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-undef.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-undef.ll
@@ -75,6 +75,43 @@ define <vscale x 2 x double> @fadd_d(<vscale x 2 x i1> %pg, <vscale x 2 x double
   ret <vscale x 2 x double> %out
 }
 
+;
+; FADD (strictfp)
+;
+
+define <vscale x 8 x half> @fadd_h_strictfp(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) strictfp {
+; CHECK-LABEL: fadd_h_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.u.nxv8f16(<vscale x 8 x i1> %pg,
+                                                                   <vscale x 8 x half> %a,
+                                                                   <vscale x 8 x half> %b)
+  ret <vscale x 8 x half> %out
+}
+
+define <vscale x 4 x float> @fadd_s_strictfp(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) strictfp {
+; CHECK-LABEL: fadd_s_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.u.nxv4f32(<vscale x 4 x i1> %pg,
+                                                                    <vscale x 4 x float> %a,
+                                                                    <vscale x 4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @fadd_d_strictfp(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) strictfp {
+; CHECK-LABEL: fadd_d_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.u.nxv2f64(<vscale x 2 x i1> %pg,
+                                                                     <vscale x 2 x double> %a,
+                                                                     <vscale x 2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
 ;
 ; FDIV
 ;
@@ -494,6 +531,43 @@ define <vscale x 2 x double> @fmul_d(<vscale x 2 x i1> %pg, <vscale x 2 x double
   ret <vscale x 2 x double> %out
 }
 
+;
+; FMUL (strictfp)
+;
+
+define <vscale x 8 x half> @fmul_h_strictfp(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) strictfp {
+; CHECK-LABEL: fmul_h_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.u.nxv8f16(<vscale x 8 x i1> %pg,
+                                                                   <vscale x 8 x half> %a,
+                                                                   <vscale x 8 x half> %b)
+  ret <vscale x 8 x half> %out
+}
+
+define <vscale x 4 x float> @fmul_s_strictfp(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) strictfp {
+; CHECK-LABEL: fmul_s_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.u.nxv4f32(<vscale x 4 x i1> %pg,
+                                                                    <vscale x 4 x float> %a,
+                                                                    <vscale x 4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @fmul_d_strictfp(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) strictfp {
+; CHECK-LABEL: fmul_d_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.u.nxv2f64(<vscale x 2 x i1> %pg,
+                                                                     <vscale x 2 x double> %a,
+                                                                     <vscale x 2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
 ;
 ; FMULX
 ;
@@ -728,6 +802,43 @@ define <vscale x 2 x double> @fsub_d(<vscale x 2 x i1> %pg, <vscale x 2 x double
   ret <vscale x 2 x double> %out
 }
 
+;
+; FSUB (strictfp)
+;
+
+define <vscale x 8 x half> @fsub_h_strictfp(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) strictfp {
+; CHECK-LABEL: fsub_h_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.u.nxv8f16(<vscale x 8 x i1> %pg,
+                                                                   <vscale x 8 x half> %a,
+                                                                   <vscale x 8 x half> %b)
+  ret <vscale x 8 x half> %out
+}
+
+define <vscale x 4 x float> @fsub_s_strictfp(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) strictfp {
+; CHECK-LABEL: fsub_s_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.u.nxv4f32(<vscale x 4 x i1> %pg,
+                                                                    <vscale x 4 x float> %a,
+                                                                    <vscale x 4 x float> %b)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @fsub_d_strictfp(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) "_strictfp" {
+; CHECK-LABEL: fsub_d_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.u.nxv2f64(<vscale x 2 x i1> %pg,
+                                                                     <vscale x 2 x double> %a,
+                                                                     <vscale x 2 x double> %b)
+  ret <vscale x 2 x double> %out
+}
+
 ;
 ; FSUBR
 ;
@@ -765,6 +876,43 @@ define <vscale x 2 x double> @fsubr_d(<vscale x 2 x i1> %pg, <vscale x 2 x doubl
   ret <vscale x 2 x double> %out
 }
 
+;
+; FSUBR (strictfp)
+;
+
+define <vscale x 8 x half> @fsubr_h_strictfp(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) strictfp {
+; CHECK-LABEL: fsubr_h_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.u.nxv8f16(<vscale x 8 x i1> %pg,
+                                                                   <vscale x 8 x half> %b,
+                                                                   <vscale x 8 x half> %a)
+  ret <vscale x 8 x half> %out
+}
+
+define <vscale x 4 x float> @fsubr_s_strictfp(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) strictfp {
+; CHECK-LABEL: fsubr_s_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsubr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    ret
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.u.nxv4f32(<vscale x 4 x i1> %pg,
+                                                                    <vscale x 4 x float> %b,
+                                                                    <vscale x 4 x float> %a)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @fsubr_d_strictfp(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) strictfp {
+; CHECK-LABEL: fsubr_d_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fsubr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    ret
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.u.nxv2f64(<vscale x 2 x i1> %pg,
+                                                                     <vscale x 2 x double> %b,
+                                                                     <vscale x 2 x double> %a)
+  ret <vscale x 2 x double> %out
+}
+
 declare <vscale x 8 x half> @llvm.aarch64.sve.fabd.u.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
 declare <vscale x 4 x float> @llvm.aarch64.sve.fabd.u.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
 declare <vscale x 2 x double> @llvm.aarch64.sve.fabd.u.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
index 221bb3b6045fb2..b2bc8feb3420f2 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
@@ -33,6 +33,15 @@ define <vscale x 8 x bfloat> @bfadd_u(<vscale x 8 x i1> %pg, <vscale x 8 x bfloa
   ret <vscale x 8 x bfloat> %res
 }
 
+define <vscale x 8 x bfloat> @bfadd_u_strictfp(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) strictfp {
+; CHECK-LABEL: bfadd_u_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bfadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fadd.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
+  ret <vscale x 8 x bfloat> %res
+}
+
 define <vscale x 8 x bfloat> @bfadd_u_ptrue(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
 ; CHECK-LABEL: bfadd_u_ptrue:
 ; CHECK:       // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
index a04c5a52139cdf..c4c78b3280bb60 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
@@ -33,6 +33,15 @@ define <vscale x 8 x bfloat> @bfmul_u_pred(<vscale x 8 x i1> %pg, <vscale x 8 x
   ret <vscale x 8 x bfloat> %res
 }
 
+define <vscale x 8 x bfloat> @bfmul_u_pred_strictfp(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) strictfp {
+; CHECK-LABEL: bfmul_u_pred_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bfmul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
+  ret <vscale x 8 x bfloat> %res
+}
+
 define <vscale x 8 x bfloat> @bfmul_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
 ; CHECK-LABEL: bfmul_u:
 ; CHECK:       // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
index 752b5ae9df6307..f3a4b039492d09 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
@@ -33,6 +33,15 @@ define <vscale x 8 x bfloat> @bfsub_u_pred(<vscale x 8 x i1> %pg, <vscale x 8 x
   ret <vscale x 8 x bfloat> %res
 }
 
+define <vscale x 8 x bfloat> @bfsub_u_pred_strictfp(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) strictfp {
+; CHECK-LABEL: bfsub_u_pred_strictfp:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bfsub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fsub.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
+  ret <vscale x 8 x bfloat> %res
+}
+
 define <vscale x 8 x bfloat> @bfsub_u(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
 ; CHECK-LABEL: bfsub_u:
 ; CHECK:       // %bb.0:

>From 267a582204b7be60835b46d1b3c569d4578652e0 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Mon, 4 Mar 2024 18:39:15 +0000
Subject: [PATCH 2/2] [LLVM][CodeGen][SVE] Use unpredicated instructions when
 there's no strictfp requirement.

SVE has unpredicated FADD, FMUL and FSUB instructions that have no
register restrictions. We already use these for all true predicates,
but we can also use then when we're allowed to potentially introduce
new trapping behaviour.

Fixes https://github.com/llvm/llvm-project/issues/56572
---
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td |  26 +++-
 .../sme-streaming-compatible-interface.ll     |   3 +-
 .../AArch64/sve-fixed-length-fp-arith.ll      | 135 ++++++++----------
 .../AArch64/sve-fixed-length-fp-reduce.ll     |   6 +-
 .../sve-fixed-length-permute-zip-uzp-trn.ll   |   6 +-
 .../AArch64/sve-intrinsics-fp-arith-undef.ll  |  26 ++--
 ...ve-streaming-mode-fixed-length-fp-arith.ll | 108 +++++---------
 ...-streaming-mode-fixed-length-fp-convert.ll |   8 +-
 ...e-streaming-mode-fixed-length-fp-reduce.ll |  12 +-
 ...eaming-mode-fixed-length-optimize-ptrue.ll |  36 ++---
 ...g-mode-fixed-length-permute-zip-uzp-trn.ll |  25 ++--
 .../AArch64/sve2p1-intrinsics-bfadd.ll        |   4 +-
 .../AArch64/sve2p1-intrinsics-bfmul.ll        |   4 +-
 .../AArch64/sve2p1-intrinsics-bfsub.ll        |   4 +-
 14 files changed, 179 insertions(+), 224 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index a519d81362a73a..7884fc10fdff65 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -213,14 +213,31 @@ def AArch64fadd_p_contract : PatFrag<(ops node:$op1, node:$op2, node:$op3),
                                      (AArch64fadd_p node:$op1, node:$op2, node:$op3), [{
   return N->getFlags().hasAllowContract();
 }]>;
+def AArch64fadd_p_nostrictfp : PatFrag<(ops node:$op1, node:$op2, node:$op3),
+                                       (AArch64fadd_p node:$op1, node:$op2, node:$op3), [{
+    // We're allowed to introduce new trapping behaviour.
+    return !MF->getFunction().getAttributes().hasFnAttr(llvm::Attribute::StrictFP);
+}]>;
 def AArch64fadd_p_nsz : PatFrag<(ops node:$op1, node:$op2, node:$op3),
                                 (AArch64fadd_p node:$op1, node:$op2, node:$op3), [{
   return N->getFlags().hasNoSignedZeros();
 }]>;
+
+def AArch64fmul_p_nostrictfp : PatFrag<(ops node:$op1, node:$op2, node:$op3),
+                                       (AArch64fmul_p node:$op1, node:$op2, node:$op3), [{
+    // We're allowed to introduce new trapping behaviour.
+    return !MF->getFunction().getAttributes().hasFnAttr(llvm::Attribute::StrictFP);
+}]>;
+
 def AArch64fsub_p_contract : PatFrag<(ops node:$op1, node:$op2, node:$op3),
                                      (AArch64fsub_p node:$op1, node:$op2, node:$op3), [{
   return N->getFlags().hasAllowContract();
 }]>;
+def AArch64fsub_p_nostrictfp : PatFrag<(ops node:$op1, node:$op2, node:$op3),
+                                       (AArch64fsub_p node:$op1, node:$op2, node:$op3), [{
+    // We're allowed to introduce new trapping behaviour.
+    return !MF->getFunction().getAttributes().hasFnAttr(llvm::Attribute::StrictFP);
+}]>;
 def AArch64fsub_p_nsz : PatFrag<(ops node:$op1, node:$op2, node:$op3),
                                 (AArch64fsub_p node:$op1, node:$op2, node:$op3), [{
   return N->getFlags().hasNoSignedZeros();
@@ -489,15 +506,18 @@ def AArch64fmax_m1 : VSelectCommPredOrPassthruPatFrags<int_aarch64_sve_fmax, AAr
 
 def AArch64fadd : PatFrags<(ops node:$op1, node:$op2),
                             [(fadd node:$op1, node:$op2),
-                             (AArch64fadd_p (SVEAllActive), node:$op1, node:$op2)]>;
+                             (AArch64fadd_p (SVEAllActive), node:$op1, node:$op2),
+                             (AArch64fadd_p_nostrictfp (SVEAnyPredicate), node:$op1, node:$op2)]>;
 
 def AArch64fmul : PatFrags<(ops node:$op1, node:$op2),
                             [(fmul node:$op1, node:$op2),
-                             (AArch64fmul_p (SVEAllActive), node:$op1, node:$op2)]>;
+                             (AArch64fmul_p (SVEAllActive), node:$op1, node:$op2),
+                             (AArch64fmul_p_nostrictfp (SVEAnyPredicate), node:$op1, node:$op2)]>;
 
 def AArch64fsub : PatFrags<(ops node:$op1, node:$op2),
                             [(fsub node:$op1, node:$op2),
-                             (AArch64fsub_p (SVEAllActive), node:$op1, node:$op2)]>;
+                             (AArch64fsub_p (SVEAllActive), node:$op1, node:$op2),
+                             (AArch64fsub_p_nostrictfp (SVEAnyPredicate), node:$op1, node:$op2)]>;
 
 def AArch64mul : PatFrag<(ops node:$op1, node:$op2),
                          (AArch64mul_p (SVEAnyPredicate), node:$op1, node:$op2)>;
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
index 6d2abf7e18419a..cfe1708055e7b7 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
@@ -151,12 +151,11 @@ define <2 x double> @streaming_compatible_with_neon_vectors(<2 x double> %arg) "
 ; CHECK-NEXT:  // %bb.3:
 ; CHECK-NEXT:    smstart sm
 ; CHECK-NEXT:  .LBB4_4:
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    add x8, sp, #16
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr z1, [x8] // 16-byte Folded Reload
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    addvl sp, sp, #1
 ; CHECK-NEXT:    add sp, sp, #16
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
index 64c4eea0ee5f36..d2d3ee52923396 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
@@ -35,7 +35,7 @@ define void @fadd_v16f16(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ptrue p0.h, vl16
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
@@ -54,11 +54,10 @@ define void @fadd_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT:    movprfx z1, z2
-; VBITS_GE_256-NEXT:    fadd z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    fadd z0.h, z0.h, z1.h
+; VBITS_GE_256-NEXT:    fadd z1.h, z2.h, z3.h
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fadd_v32f16:
@@ -66,7 +65,7 @@ define void @fadd_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
 ; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; VBITS_GE_512-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_512-NEXT:    fadd z0.h, z0.h, z1.h
 ; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x0]
 ; VBITS_GE_512-NEXT:    ret
   %op1 = load <32 x half>, ptr %a
@@ -82,7 +81,7 @@ define void @fadd_v64f16(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK-NEXT:    ptrue p0.h, vl64
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <64 x half>, ptr %a
@@ -98,7 +97,7 @@ define void @fadd_v128f16(ptr %a, ptr %b)  vscale_range(16,0) #0 {
 ; CHECK-NEXT:    ptrue p0.h, vl128
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <128 x half>, ptr %a
@@ -134,7 +133,7 @@ define void @fadd_v8f32(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ptrue p0.s, vl8
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, ptr %a
@@ -153,11 +152,10 @@ define void @fadd_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT:    movprfx z1, z2
-; VBITS_GE_256-NEXT:    fadd z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    fadd z0.s, z0.s, z1.s
+; VBITS_GE_256-NEXT:    fadd z1.s, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fadd_v16f32:
@@ -165,7 +163,7 @@ define void @fadd_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
 ; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; VBITS_GE_512-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_512-NEXT:    fadd z0.s, z0.s, z1.s
 ; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
 ; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x float>, ptr %a
@@ -181,7 +179,7 @@ define void @fadd_v32f32(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK-NEXT:    ptrue p0.s, vl32
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x float>, ptr %a
@@ -197,7 +195,7 @@ define void @fadd_v64f32(ptr %a, ptr %b) vscale_range(16,0) #0 {
 ; CHECK-NEXT:    ptrue p0.s, vl64
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <64 x float>, ptr %a
@@ -233,7 +231,7 @@ define void @fadd_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ptrue p0.d, vl4
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, ptr %a
@@ -252,11 +250,10 @@ define void @fadd_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT:    movprfx z1, z2
-; VBITS_GE_256-NEXT:    fadd z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    fadd z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    fadd z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fadd_v8f64:
@@ -264,7 +261,7 @@ define void @fadd_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
 ; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; VBITS_GE_512-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_512-NEXT:    fadd z0.d, z0.d, z1.d
 ; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
 ; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x double>, ptr %a
@@ -280,7 +277,7 @@ define void @fadd_v16f64(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK-NEXT:    ptrue p0.d, vl16
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x double>, ptr %a
@@ -296,7 +293,7 @@ define void @fadd_v32f64(ptr %a, ptr %b) vscale_range(16,0) #0 {
 ; CHECK-NEXT:    ptrue p0.d, vl32
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x double>, ptr %a
@@ -970,7 +967,7 @@ define void @fmul_v16f16(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ptrue p0.h, vl16
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fmul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
@@ -989,11 +986,10 @@ define void @fmul_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT:    movprfx z1, z2
-; VBITS_GE_256-NEXT:    fmul z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    fmul z0.h, z0.h, z1.h
+; VBITS_GE_256-NEXT:    fmul z1.h, z2.h, z3.h
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmul_v32f16:
@@ -1001,7 +997,7 @@ define void @fmul_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
 ; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; VBITS_GE_512-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_512-NEXT:    fmul z0.h, z0.h, z1.h
 ; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x0]
 ; VBITS_GE_512-NEXT:    ret
   %op1 = load <32 x half>, ptr %a
@@ -1017,7 +1013,7 @@ define void @fmul_v64f16(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK-NEXT:    ptrue p0.h, vl64
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fmul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <64 x half>, ptr %a
@@ -1033,7 +1029,7 @@ define void @fmul_v128f16(ptr %a, ptr %b) vscale_range(16,0) #0 {
 ; CHECK-NEXT:    ptrue p0.h, vl128
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fmul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <128 x half>, ptr %a
@@ -1069,7 +1065,7 @@ define void @fmul_v8f32(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ptrue p0.s, vl8
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fmul z0.s, z0.s, z1.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, ptr %a
@@ -1088,11 +1084,10 @@ define void @fmul_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT:    movprfx z1, z2
-; VBITS_GE_256-NEXT:    fmul z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    fmul z0.s, z0.s, z1.s
+; VBITS_GE_256-NEXT:    fmul z1.s, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmul_v16f32:
@@ -1100,7 +1095,7 @@ define void @fmul_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
 ; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; VBITS_GE_512-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_512-NEXT:    fmul z0.s, z0.s, z1.s
 ; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
 ; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x float>, ptr %a
@@ -1116,7 +1111,7 @@ define void @fmul_v32f32(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK-NEXT:    ptrue p0.s, vl32
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fmul z0.s, z0.s, z1.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x float>, ptr %a
@@ -1132,7 +1127,7 @@ define void @fmul_v64f32(ptr %a, ptr %b) vscale_range(16,0) #0 {
 ; CHECK-NEXT:    ptrue p0.s, vl64
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fmul z0.s, z0.s, z1.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <64 x float>, ptr %a
@@ -1168,7 +1163,7 @@ define void @fmul_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ptrue p0.d, vl4
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fmul z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, ptr %a
@@ -1187,11 +1182,10 @@ define void @fmul_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT:    movprfx z1, z2
-; VBITS_GE_256-NEXT:    fmul z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    fmul z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    fmul z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fmul_v8f64:
@@ -1199,7 +1193,7 @@ define void @fmul_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
 ; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; VBITS_GE_512-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_512-NEXT:    fmul z0.d, z0.d, z1.d
 ; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
 ; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x double>, ptr %a
@@ -1215,7 +1209,7 @@ define void @fmul_v16f64(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK-NEXT:    ptrue p0.d, vl16
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fmul z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x double>, ptr %a
@@ -1231,7 +1225,7 @@ define void @fmul_v32f64(ptr %a, ptr %b) vscale_range(16,0) #0 {
 ; CHECK-NEXT:    ptrue p0.d, vl32
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fmul z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x double>, ptr %a
@@ -1807,7 +1801,7 @@ define void @fsub_v16f16(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ptrue p0.h, vl16
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fsub z0.h, z0.h, z1.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
@@ -1826,11 +1820,10 @@ define void @fsub_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x1, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT:    movprfx z1, z2
-; VBITS_GE_256-NEXT:    fsub z1.h, p0/m, z1.h, z3.h
-; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    fsub z0.h, z0.h, z1.h
+; VBITS_GE_256-NEXT:    fsub z1.h, z2.h, z3.h
 ; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fsub_v32f16:
@@ -1838,7 +1831,7 @@ define void @fsub_v32f16(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
 ; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; VBITS_GE_512-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
+; VBITS_GE_512-NEXT:    fsub z0.h, z0.h, z1.h
 ; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x0]
 ; VBITS_GE_512-NEXT:    ret
   %op1 = load <32 x half>, ptr %a
@@ -1854,7 +1847,7 @@ define void @fsub_v64f16(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK-NEXT:    ptrue p0.h, vl64
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fsub z0.h, z0.h, z1.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <64 x half>, ptr %a
@@ -1870,7 +1863,7 @@ define void @fsub_v128f16(ptr %a, ptr %b) vscale_range(16,0) #0 {
 ; CHECK-NEXT:    ptrue p0.h, vl128
 ; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
-; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fsub z0.h, z0.h, z1.h
 ; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <128 x half>, ptr %a
@@ -1906,7 +1899,7 @@ define void @fsub_v8f32(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ptrue p0.s, vl8
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fsub z0.s, z0.s, z1.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, ptr %a
@@ -1925,11 +1918,10 @@ define void @fsub_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x1, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT:    movprfx z1, z2
-; VBITS_GE_256-NEXT:    fsub z1.s, p0/m, z1.s, z3.s
-; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    fsub z0.s, z0.s, z1.s
+; VBITS_GE_256-NEXT:    fsub z1.s, z2.s, z3.s
 ; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fsub_v16f32:
@@ -1937,7 +1929,7 @@ define void @fsub_v16f32(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
 ; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; VBITS_GE_512-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
+; VBITS_GE_512-NEXT:    fsub z0.s, z0.s, z1.s
 ; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
 ; VBITS_GE_512-NEXT:    ret
   %op1 = load <16 x float>, ptr %a
@@ -1953,7 +1945,7 @@ define void @fsub_v32f32(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK-NEXT:    ptrue p0.s, vl32
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fsub z0.s, z0.s, z1.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x float>, ptr %a
@@ -1969,7 +1961,7 @@ define void @fsub_v64f32(ptr %a, ptr %b) vscale_range(16,0) #0 {
 ; CHECK-NEXT:    ptrue p0.s, vl64
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
-; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fsub z0.s, z0.s, z1.s
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <64 x float>, ptr %a
@@ -2005,7 +1997,7 @@ define void @fsub_v4f64(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-NEXT:    ptrue p0.d, vl4
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fsub z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, ptr %a
@@ -2024,11 +2016,10 @@ define void @fsub_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
-; VBITS_GE_256-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT:    movprfx z1, z2
-; VBITS_GE_256-NEXT:    fsub z1.d, p0/m, z1.d, z3.d
-; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    fsub z0.d, z0.d, z1.d
+; VBITS_GE_256-NEXT:    fsub z1.d, z2.d, z3.d
 ; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
 ; VBITS_GE_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: fsub_v8f64:
@@ -2036,7 +2027,7 @@ define void @fsub_v8f64(ptr %a, ptr %b) #0 {
 ; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
 ; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; VBITS_GE_512-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
+; VBITS_GE_512-NEXT:    fsub z0.d, z0.d, z1.d
 ; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
 ; VBITS_GE_512-NEXT:    ret
   %op1 = load <8 x double>, ptr %a
@@ -2052,7 +2043,7 @@ define void @fsub_v16f64(ptr %a, ptr %b) vscale_range(8,0) #0 {
 ; CHECK-NEXT:    ptrue p0.d, vl16
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fsub z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x double>, ptr %a
@@ -2068,7 +2059,7 @@ define void @fsub_v32f64(ptr %a, ptr %b) vscale_range(16,0) #0 {
 ; CHECK-NEXT:    ptrue p0.d, vl32
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
-; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fsub z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x double>, ptr %a
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll
index 8053a401e5f45e..b0a0edce73926d 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll
@@ -343,7 +343,7 @@ define half @faddv_v32f16(half %start, ptr %a) #0 {
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
 ; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    fadd z1.h, p0/m, z1.h, z2.h
+; VBITS_GE_256-NEXT:    fadd z1.h, z2.h, z1.h
 ; VBITS_GE_256-NEXT:    faddv h1, p0, z1.h
 ; VBITS_GE_256-NEXT:    fadd h0, h0, h1
 ; VBITS_GE_256-NEXT:    ret
@@ -430,7 +430,7 @@ define float @faddv_v16f32(float %start, ptr %a) #0 {
 ; VBITS_GE_256-NEXT:    mov x8, #8 // =0x8
 ; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0, x8, lsl #2]
 ; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    fadd z1.s, p0/m, z1.s, z2.s
+; VBITS_GE_256-NEXT:    fadd z1.s, z2.s, z1.s
 ; VBITS_GE_256-NEXT:    faddv s1, p0, z1.s
 ; VBITS_GE_256-NEXT:    fadd s0, s0, s1
 ; VBITS_GE_256-NEXT:    ret
@@ -514,7 +514,7 @@ define double @faddv_v8f64(double %start, ptr %a) #0 {
 ; VBITS_GE_256-NEXT:    mov x8, #4 // =0x4
 ; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0, x8, lsl #3]
 ; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x0]
-; VBITS_GE_256-NEXT:    fadd z1.d, p0/m, z1.d, z2.d
+; VBITS_GE_256-NEXT:    fadd z1.d, z2.d, z1.d
 ; VBITS_GE_256-NEXT:    faddv d1, p0, z1.d
 ; VBITS_GE_256-NEXT:    fadd d0, d0, d1
 ; VBITS_GE_256-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
index e07645c27df728..1788f39f9de849 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll
@@ -150,7 +150,7 @@ define void @zip_v4f64(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_512-NEXT:    stp d3, d2, [sp, #16]
 ; VBITS_EQ_512-NEXT:    stp d5, d4, [sp]
 ; VBITS_EQ_512-NEXT:    ld1d { z1.d }, p0/z, [x8]
-; VBITS_EQ_512-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; VBITS_EQ_512-NEXT:    fadd z0.d, z0.d, z1.d
 ; VBITS_EQ_512-NEXT:    st1d { z0.d }, p0, [x0]
 ; VBITS_EQ_512-NEXT:    mov sp, x29
 ; VBITS_EQ_512-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
@@ -356,7 +356,7 @@ define void @trn_v4f64(ptr %a, ptr %b) #0 {
 ; VBITS_EQ_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
 ; VBITS_EQ_512-NEXT:    trn1 z2.d, z0.d, z1.d
 ; VBITS_EQ_512-NEXT:    trn2 z0.d, z0.d, z1.d
-; VBITS_EQ_512-NEXT:    fadd z0.d, p0/m, z0.d, z2.d
+; VBITS_EQ_512-NEXT:    fadd z0.d, z2.d, z0.d
 ; VBITS_EQ_512-NEXT:    st1d { z0.d }, p0, [x0]
 ; VBITS_EQ_512-NEXT:    ret
   %tmp1 = load <4 x double>, ptr %a
@@ -667,7 +667,7 @@ define void @zip_vscale2_4(ptr %a, ptr %b) #2 {
 ; CHECK-NEXT:    stp d3, d2, [sp, #16]
 ; CHECK-NEXT:    stp d5, d4, [sp]
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x8]
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
 ; CHECK-NEXT:    mov sp, x29
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-undef.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-undef.ll
index 7d5e22882cf701..04db29eb3a82b0 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-undef.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-fp-arith-undef.ll
@@ -45,7 +45,7 @@ define <vscale x 2 x double> @fabd_d(<vscale x 2 x i1> %pg, <vscale x 2 x double
 define <vscale x 8 x half> @fadd_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: fadd_h:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.u.nxv8f16(<vscale x 8 x i1> %pg,
                                                                    <vscale x 8 x half> %a,
@@ -56,7 +56,7 @@ define <vscale x 8 x half> @fadd_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a
 define <vscale x 4 x float> @fadd_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
 ; CHECK-LABEL: fadd_s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.u.nxv4f32(<vscale x 4 x i1> %pg,
                                                                     <vscale x 4 x float> %a,
@@ -67,7 +67,7 @@ define <vscale x 4 x float> @fadd_s(<vscale x 4 x i1> %pg, <vscale x 4 x float>
 define <vscale x 2 x double> @fadd_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: fadd_d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.u.nxv2f64(<vscale x 2 x i1> %pg,
                                                                      <vscale x 2 x double> %a,
@@ -501,7 +501,7 @@ define <vscale x 2 x double> @fmsb_d(<vscale x 2 x i1> %pg, <vscale x 2 x double
 define <vscale x 8 x half> @fmul_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: fmul_h:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fmul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.u.nxv8f16(<vscale x 8 x i1> %pg,
                                                                    <vscale x 8 x half> %a,
@@ -512,7 +512,7 @@ define <vscale x 8 x half> @fmul_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a
 define <vscale x 4 x float> @fmul_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
 ; CHECK-LABEL: fmul_s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fmul z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.u.nxv4f32(<vscale x 4 x i1> %pg,
                                                                     <vscale x 4 x float> %a,
@@ -523,7 +523,7 @@ define <vscale x 4 x float> @fmul_s(<vscale x 4 x i1> %pg, <vscale x 4 x float>
 define <vscale x 2 x double> @fmul_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: fmul_d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fmul z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.u.nxv2f64(<vscale x 2 x i1> %pg,
                                                                      <vscale x 2 x double> %a,
@@ -772,7 +772,7 @@ define <vscale x 2 x double> @fnmsb_d(<vscale x 2 x i1> %pg, <vscale x 2 x doubl
 define <vscale x 8 x half> @fsub_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: fsub_h:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fsub z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.u.nxv8f16(<vscale x 8 x i1> %pg,
                                                                    <vscale x 8 x half> %a,
@@ -783,7 +783,7 @@ define <vscale x 8 x half> @fsub_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a
 define <vscale x 4 x float> @fsub_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
 ; CHECK-LABEL: fsub_s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fsub z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.u.nxv4f32(<vscale x 4 x i1> %pg,
                                                                     <vscale x 4 x float> %a,
@@ -794,7 +794,7 @@ define <vscale x 4 x float> @fsub_s(<vscale x 4 x i1> %pg, <vscale x 4 x float>
 define <vscale x 2 x double> @fsub_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: fsub_d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fsub z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.u.nxv2f64(<vscale x 2 x i1> %pg,
                                                                      <vscale x 2 x double> %a,
@@ -831,7 +831,7 @@ define <vscale x 4 x float> @fsub_s_strictfp(<vscale x 4 x i1> %pg, <vscale x 4
 define <vscale x 2 x double> @fsub_d_strictfp(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) "_strictfp" {
 ; CHECK-LABEL: fsub_d_strictfp:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fsub z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.u.nxv2f64(<vscale x 2 x i1> %pg,
                                                                      <vscale x 2 x double> %a,
@@ -846,7 +846,7 @@ define <vscale x 2 x double> @fsub_d_strictfp(<vscale x 2 x i1> %pg, <vscale x 2
 define <vscale x 8 x half> @fsubr_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
 ; CHECK-LABEL: fsubr_h:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fsub z0.h, z1.h, z0.h
 ; CHECK-NEXT:    ret
   %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.u.nxv8f16(<vscale x 8 x i1> %pg,
                                                                    <vscale x 8 x half> %b,
@@ -857,7 +857,7 @@ define <vscale x 8 x half> @fsubr_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %
 define <vscale x 4 x float> @fsubr_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
 ; CHECK-LABEL: fsubr_s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fsubr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fsub z0.s, z1.s, z0.s
 ; CHECK-NEXT:    ret
   %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.u.nxv4f32(<vscale x 4 x i1> %pg,
                                                                     <vscale x 4 x float> %b,
@@ -868,7 +868,7 @@ define <vscale x 4 x float> @fsubr_s(<vscale x 4 x i1> %pg, <vscale x 4 x float>
 define <vscale x 2 x double> @fsubr_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: fsubr_d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fsubr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fsub z0.d, z1.d, z0.d
 ; CHECK-NEXT:    ret
   %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.u.nxv2f64(<vscale x 2 x i1> %pg,
                                                                      <vscale x 2 x double> %b,
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
index c436dea8ff1b2e..cedf5fa3210236 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
@@ -11,10 +11,9 @@ target triple = "aarch64-unknown-linux-gnu"
 define <2 x half> @fadd_v2f16(<2 x half> %op1, <2 x half> %op2) {
 ; CHECK-LABEL: fadd_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fadd <2 x half> %op1, %op2
@@ -24,10 +23,9 @@ define <2 x half> @fadd_v2f16(<2 x half> %op1, <2 x half> %op2) {
 define <4 x half> @fadd_v4f16(<4 x half> %op1, <4 x half> %op2) {
 ; CHECK-LABEL: fadd_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fadd <4 x half> %op1, %op2
@@ -37,10 +35,9 @@ define <4 x half> @fadd_v4f16(<4 x half> %op1, <4 x half> %op2) {
 define <8 x half> @fadd_v8f16(<8 x half> %op1, <8 x half> %op2) {
 ; CHECK-LABEL: fadd_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fadd <8 x half> %op1, %op2
@@ -50,12 +47,10 @@ define <8 x half> @fadd_v8f16(<8 x half> %op1, <8 x half> %op2) {
 define void @fadd_v16f16(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v16f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fadd z1.h, p0/m, z1.h, z3.h
+; CHECK-NEXT:    fadd z0.h, z1.h, z0.h
+; CHECK-NEXT:    fadd z1.h, z2.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
@@ -68,10 +63,9 @@ define void @fadd_v16f16(ptr %a, ptr %b) {
 define <2 x float> @fadd_v2f32(<2 x float> %op1, <2 x float> %op2) {
 ; CHECK-LABEL: fadd_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl2
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fadd <2 x float> %op1, %op2
@@ -81,10 +75,9 @@ define <2 x float> @fadd_v2f32(<2 x float> %op1, <2 x float> %op2) {
 define <4 x float> @fadd_v4f32(<4 x float> %op1, <4 x float> %op2) {
 ; CHECK-LABEL: fadd_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fadd <4 x float> %op1, %op2
@@ -94,12 +87,10 @@ define <4 x float> @fadd_v4f32(<4 x float> %op1, <4 x float> %op2) {
 define void @fadd_v8f32(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fadd z1.s, p0/m, z1.s, z3.s
+; CHECK-NEXT:    fadd z0.s, z1.s, z0.s
+; CHECK-NEXT:    fadd z1.s, z2.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, ptr %a
@@ -112,10 +103,9 @@ define void @fadd_v8f32(ptr %a, ptr %b) {
 define <2 x double> @fadd_v2f64(<2 x double> %op1, <2 x double> %op2) {
 ; CHECK-LABEL: fadd_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fadd <2 x double> %op1, %op2
@@ -125,12 +115,10 @@ define <2 x double> @fadd_v2f64(<2 x double> %op1, <2 x double> %op2) {
 define void @fadd_v4f64(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fadd z1.d, p0/m, z1.d, z3.d
+; CHECK-NEXT:    fadd z0.d, z1.d, z0.d
+; CHECK-NEXT:    fadd z1.d, z2.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, ptr %a
@@ -431,10 +419,9 @@ define void @fma_v4f64(ptr %a, ptr %b, ptr %c) {
 define <2 x half> @fmul_v2f16(<2 x half> %op1, <2 x half> %op2) {
 ; CHECK-LABEL: fmul_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fmul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fmul <2 x half> %op1, %op2
@@ -444,10 +431,9 @@ define <2 x half> @fmul_v2f16(<2 x half> %op1, <2 x half> %op2) {
 define <4 x half> @fmul_v4f16(<4 x half> %op1, <4 x half> %op2) {
 ; CHECK-LABEL: fmul_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fmul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fmul <4 x half> %op1, %op2
@@ -457,10 +443,9 @@ define <4 x half> @fmul_v4f16(<4 x half> %op1, <4 x half> %op2) {
 define <8 x half> @fmul_v8f16(<8 x half> %op1, <8 x half> %op2) {
 ; CHECK-LABEL: fmul_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fmul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fmul <8 x half> %op1, %op2
@@ -470,12 +455,10 @@ define <8 x half> @fmul_v8f16(<8 x half> %op1, <8 x half> %op2) {
 define void @fmul_v16f16(ptr %a, ptr %b) {
 ; CHECK-LABEL: fmul_v16f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fmul z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fmul z1.h, p0/m, z1.h, z3.h
+; CHECK-NEXT:    fmul z0.h, z1.h, z0.h
+; CHECK-NEXT:    fmul z1.h, z2.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
@@ -488,10 +471,9 @@ define void @fmul_v16f16(ptr %a, ptr %b) {
 define <2 x float> @fmul_v2f32(<2 x float> %op1, <2 x float> %op2) {
 ; CHECK-LABEL: fmul_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl2
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fmul z0.s, z0.s, z1.s
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fmul <2 x float> %op1, %op2
@@ -501,10 +483,9 @@ define <2 x float> @fmul_v2f32(<2 x float> %op1, <2 x float> %op2) {
 define <4 x float> @fmul_v4f32(<4 x float> %op1, <4 x float> %op2) {
 ; CHECK-LABEL: fmul_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fmul z0.s, z0.s, z1.s
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fmul <4 x float> %op1, %op2
@@ -514,12 +495,10 @@ define <4 x float> @fmul_v4f32(<4 x float> %op1, <4 x float> %op2) {
 define void @fmul_v8f32(ptr %a, ptr %b) {
 ; CHECK-LABEL: fmul_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fmul z1.s, p0/m, z1.s, z3.s
+; CHECK-NEXT:    fmul z0.s, z1.s, z0.s
+; CHECK-NEXT:    fmul z1.s, z2.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, ptr %a
@@ -532,10 +511,9 @@ define void @fmul_v8f32(ptr %a, ptr %b) {
 define <2 x double> @fmul_v2f64(<2 x double> %op1, <2 x double> %op2) {
 ; CHECK-LABEL: fmul_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fmul z0.d, z0.d, z1.d
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fmul <2 x double> %op1, %op2
@@ -545,12 +523,10 @@ define <2 x double> @fmul_v2f64(<2 x double> %op1, <2 x double> %op2) {
 define void @fmul_v4f64(ptr %a, ptr %b) {
 ; CHECK-LABEL: fmul_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fmul z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fmul z1.d, p0/m, z1.d, z3.d
+; CHECK-NEXT:    fmul z0.d, z1.d, z0.d
+; CHECK-NEXT:    fmul z1.d, z2.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, ptr %a
@@ -809,10 +785,9 @@ define void @fsqrt_v4f64(ptr %a) {
 define <2 x half> @fsub_v2f16(<2 x half> %op1, <2 x half> %op2) {
 ; CHECK-LABEL: fsub_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fsub z0.h, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fsub <2 x half> %op1, %op2
@@ -822,10 +797,9 @@ define <2 x half> @fsub_v2f16(<2 x half> %op1, <2 x half> %op2) {
 define <4 x half> @fsub_v4f16(<4 x half> %op1, <4 x half> %op2) {
 ; CHECK-LABEL: fsub_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fsub z0.h, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fsub <4 x half> %op1, %op2
@@ -835,10 +809,9 @@ define <4 x half> @fsub_v4f16(<4 x half> %op1, <4 x half> %op2) {
 define <8 x half> @fsub_v8f16(<8 x half> %op1, <8 x half> %op2) {
 ; CHECK-LABEL: fsub_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    fsub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fsub z0.h, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fsub <8 x half> %op1, %op2
@@ -848,12 +821,10 @@ define <8 x half> @fsub_v8f16(<8 x half> %op1, <8 x half> %op2) {
 define void @fsub_v16f16(ptr %a, ptr %b) {
 ; CHECK-LABEL: fsub_v16f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fsubr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fsub z1.h, p0/m, z1.h, z3.h
+; CHECK-NEXT:    fsub z0.h, z1.h, z0.h
+; CHECK-NEXT:    fsub z1.h, z2.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
@@ -866,10 +837,9 @@ define void @fsub_v16f16(ptr %a, ptr %b) {
 define <2 x float> @fsub_v2f32(<2 x float> %op1, <2 x float> %op2) {
 ; CHECK-LABEL: fsub_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl2
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fsub z0.s, z0.s, z1.s
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fsub <2 x float> %op1, %op2
@@ -879,10 +849,9 @@ define <2 x float> @fsub_v2f32(<2 x float> %op1, <2 x float> %op2) {
 define <4 x float> @fsub_v4f32(<4 x float> %op1, <4 x float> %op2) {
 ; CHECK-LABEL: fsub_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    fsub z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fsub z0.s, z0.s, z1.s
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fsub <4 x float> %op1, %op2
@@ -892,12 +861,10 @@ define <4 x float> @fsub_v4f32(<4 x float> %op1, <4 x float> %op2) {
 define void @fsub_v8f32(ptr %a, ptr %b) {
 ; CHECK-LABEL: fsub_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fsubr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fsub z1.s, p0/m, z1.s, z3.s
+; CHECK-NEXT:    fsub z0.s, z1.s, z0.s
+; CHECK-NEXT:    fsub z1.s, z2.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, ptr %a
@@ -910,10 +877,9 @@ define void @fsub_v8f32(ptr %a, ptr %b) {
 define <2 x double> @fsub_v2f64(<2 x double> %op1, <2 x double> %op2) {
 ; CHECK-LABEL: fsub_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    fsub z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fsub z0.d, z0.d, z1.d
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fsub <2 x double> %op1, %op2
@@ -923,12 +889,10 @@ define <2 x double> @fsub_v2f64(<2 x double> %op1, <2 x double> %op2) {
 define void @fsub_v4f64(ptr %a, ptr %b) {
 ; CHECK-LABEL: fsub_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fsubr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fsub z1.d, p0/m, z1.d, z3.d
+; CHECK-NEXT:    fsub z0.d, z1.d, z0.d
+; CHECK-NEXT:    fsub z1.d, z2.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, ptr %a
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll
index 18f9a4d371d0cf..61f7ae27e9d5a4 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-convert.ll
@@ -8,11 +8,11 @@ target triple = "aarch64-unknown-linux-gnu"
 define void @fp_convert_combine_crash(ptr %a, ptr %b) {
 ; CHECK-LABEL: fp_convert_combine_crash:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl4
-; CHECK-NEXT:    ldp q1, q2, [x0]
 ; CHECK-NEXT:    fmov z0.s, #8.00000000
-; CHECK-NEXT:    fmul z1.s, p0/m, z1.s, z0.s
-; CHECK-NEXT:    fmul z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    fmul z1.s, z1.s, z0.s
+; CHECK-NEXT:    fmul z0.s, z2.s, z0.s
 ; CHECK-NEXT:    fcvtzs z1.s, p0/m, z1.s
 ; CHECK-NEXT:    fcvtzs z0.s, p0/m, z0.s
 ; CHECK-NEXT:    stp q1, q0, [x1]
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
index d2d771c48c2044..1d5e726ca207b2 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
@@ -211,9 +211,9 @@ define half @faddv_v8f16(half %start, <8 x half> %a) {
 define half @faddv_v16f16(half %start, ptr %a) {
 ; CHECK-LABEL: faddv_v16f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    ldp q2, q1, [x0]
-; CHECK-NEXT:    fadd z1.h, p0/m, z1.h, z2.h
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    fadd z1.h, z2.h, z1.h
 ; CHECK-NEXT:    faddv h1, p0, z1.h
 ; CHECK-NEXT:    fadd h0, h0, h1
 ; CHECK-NEXT:    ret
@@ -249,9 +249,9 @@ define float @faddv_v4f32(float %start, <4 x float> %a) {
 define float @faddv_v8f32(float %start, ptr %a) {
 ; CHECK-LABEL: faddv_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldp q2, q1, [x0]
-; CHECK-NEXT:    fadd z1.s, p0/m, z1.s, z2.s
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    fadd z1.s, z2.s, z1.s
 ; CHECK-NEXT:    faddv s1, p0, z1.s
 ; CHECK-NEXT:    fadd s0, s0, s1
 ; CHECK-NEXT:    ret
@@ -285,9 +285,9 @@ define double @faddv_v2f64(double %start, <2 x double> %a) {
 define double @faddv_v4f64(double %start, ptr %a) {
 ; CHECK-LABEL: faddv_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q2, q1, [x0]
-; CHECK-NEXT:    fadd z1.d, p0/m, z1.d, z2.d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fadd z1.d, z2.d, z1.d
 ; CHECK-NEXT:    faddv d1, p0, z1.d
 ; CHECK-NEXT:    fadd d0, d0, d1
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
index f0b0b3269e98ff..e02f11ed7b3a48 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
@@ -214,10 +214,9 @@ define void @abs_v4i64(ptr %a) {
 define void @fadd_v2f16(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v2f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    ldr s1, [x1]
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
@@ -231,10 +230,9 @@ define void @fadd_v2f16(ptr %a, ptr %b) {
 define void @fadd_v4f16(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v4f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl4
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x half>, ptr %a
@@ -247,10 +245,9 @@ define void @fadd_v4f16(ptr %a, ptr %b) {
 define void @fadd_v8f16(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v8f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    fadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x half>, ptr %a
@@ -263,12 +260,10 @@ define void @fadd_v8f16(ptr %a, ptr %b) {
 define void @fadd_v16f16(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v16f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fadd z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fadd z1.h, p0/m, z1.h, z3.h
+; CHECK-NEXT:    fadd z0.h, z1.h, z0.h
+; CHECK-NEXT:    fadd z1.h, z2.h, z3.h
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
@@ -281,10 +276,9 @@ define void @fadd_v16f16(ptr %a, ptr %b) {
 define void @fadd_v2f32(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl2
 ; CHECK-NEXT:    ldr d0, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
 ; CHECK-NEXT:    str d0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <2 x float>, ptr %a
@@ -297,10 +291,9 @@ define void @fadd_v2f32(ptr %a, ptr %b) {
 define void @fadd_v4f32(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    fadd z0.s, z0.s, z1.s
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x float>, ptr %a
@@ -313,12 +306,10 @@ define void @fadd_v4f32(ptr %a, ptr %b) {
 define void @fadd_v8f32(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fadd z1.s, p0/m, z1.s, z3.s
+; CHECK-NEXT:    fadd z0.s, z1.s, z0.s
+; CHECK-NEXT:    fadd z1.s, z2.s, z3.s
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, ptr %a
@@ -331,10 +322,9 @@ define void @fadd_v8f32(ptr %a, ptr %b) {
 define void @fadd_v2f64(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fadd z0.d, z0.d, z1.d
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <2 x double>, ptr %a
@@ -347,12 +337,10 @@ define void @fadd_v2f64(ptr %a, ptr %b) {
 define void @fadd_v4f64(ptr %a, ptr %b) {
 ; CHECK-LABEL: fadd_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q0, q3, [x1]
 ; CHECK-NEXT:    ldp q1, q2, [x0]
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT:    movprfx z1, z2
-; CHECK-NEXT:    fadd z1.d, p0/m, z1.d, z3.d
+; CHECK-NEXT:    fadd z0.d, z1.d, z0.d
+; CHECK-NEXT:    fadd z1.d, z2.d, z3.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, ptr %a
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
index d7bfb6b2680e13..647e09687be360 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
@@ -287,15 +287,13 @@ define void @zip_v4f64(ptr %a, ptr %b) {
 ; CHECK-LABEL: zip_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q3, q2, [x1]
 ; CHECK-NEXT:    zip1 z4.d, z1.d, z3.d
 ; CHECK-NEXT:    zip1 z5.d, z0.d, z2.d
 ; CHECK-NEXT:    trn2 z1.d, z1.d, z3.d
 ; CHECK-NEXT:    trn2 z0.d, z0.d, z2.d
-; CHECK-NEXT:    movprfx z2, z4
-; CHECK-NEXT:    fadd z2.d, p0/m, z2.d, z5.d
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fadd z2.d, z4.d, z5.d
+; CHECK-NEXT:    fadd z0.d, z1.d, z0.d
 ; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <4 x double>, ptr %a
@@ -449,14 +447,13 @@ define void @trn_v4f64(ptr %a, ptr %b) {
 ; CHECK-LABEL: trn_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q2, [x0]
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q1, q3, [x1]
 ; CHECK-NEXT:    zip1 z4.d, z0.d, z1.d
 ; CHECK-NEXT:    trn2 z0.d, z0.d, z1.d
 ; CHECK-NEXT:    zip1 z1.d, z2.d, z3.d
 ; CHECK-NEXT:    trn2 z2.d, z2.d, z3.d
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z4.d
-; CHECK-NEXT:    fadd z1.d, p0/m, z1.d, z2.d
+; CHECK-NEXT:    fadd z0.d, z4.d, z0.d
+; CHECK-NEXT:    fadd z1.d, z1.d, z2.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <4 x double>, ptr %a
@@ -471,12 +468,11 @@ define void @trn_v4f64(ptr %a, ptr %b) {
 define void @trn_v4f32(ptr %a, ptr %b) {
 ; CHECK-LABEL: trn_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x1]
 ; CHECK-NEXT:    trn1 z2.s, z0.s, z1.s
 ; CHECK-NEXT:    trn2 z0.s, z0.s, z1.s
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    fadd z0.s, z2.s, z0.s
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <4 x float>, ptr %a
@@ -1025,7 +1021,6 @@ define void @uzp_v8f32(ptr %a, ptr %b) #0{
 ; CHECK-NEXT:    ldp q2, q0, [x0]
 ; CHECK-NEXT:    adrp x8, .LCPI21_0
 ; CHECK-NEXT:    ldp q4, q1, [x1]
-; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    mov z3.s, z0.s[2]
 ; CHECK-NEXT:    mov z5.s, z1.s[2]
 ; CHECK-NEXT:    stp s0, s3, [sp, #24]
@@ -1042,8 +1037,8 @@ define void @uzp_v8f32(ptr %a, ptr %b) #0{
 ; CHECK-NEXT:    tbl z0.s, { z4.s }, z5.s
 ; CHECK-NEXT:    str s1, [sp, #32]
 ; CHECK-NEXT:    ldr q1, [sp, #32]
-; CHECK-NEXT:    fadd z1.s, p0/m, z1.s, z2.s
-; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z3.s
+; CHECK-NEXT:    fadd z1.s, z2.s, z1.s
+; CHECK-NEXT:    fadd z0.s, z3.s, z0.s
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
@@ -1186,15 +1181,13 @@ define void @zip_vscale2_4(ptr %a, ptr %b) {
 ; CHECK-LABEL: zip_vscale2_4:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q1, q0, [x0]
-; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q3, q2, [x1]
 ; CHECK-NEXT:    zip1 z4.d, z1.d, z3.d
 ; CHECK-NEXT:    zip1 z5.d, z0.d, z2.d
 ; CHECK-NEXT:    trn2 z1.d, z1.d, z3.d
 ; CHECK-NEXT:    trn2 z0.d, z0.d, z2.d
-; CHECK-NEXT:    movprfx z2, z4
-; CHECK-NEXT:    fadd z2.d, p0/m, z2.d, z5.d
-; CHECK-NEXT:    fadd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    fadd z2.d, z4.d, z5.d
+; CHECK-NEXT:    fadd z0.d, z1.d, z0.d
 ; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
   %tmp1 = load <4 x double>, ptr %a
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
index b2bc8feb3420f2..62b0c9ea331151 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfadd.ll
@@ -27,7 +27,7 @@ define <vscale x 8 x bfloat> @bfadd_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x
 define <vscale x 8 x bfloat> @bfadd_u(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
 ; CHECK-LABEL: bfadd_u:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bfadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    bfadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fadd.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
   ret <vscale x 8 x bfloat> %res
@@ -57,7 +57,7 @@ define <vscale x 8 x bfloat> @bfadd_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z2.h, #0 // =0x0
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
-; CHECK-NEXT:    bfadd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    bfadd z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer
   %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fadd.u.nxv8bf16(<vscale x 8 x i1> %pg,
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
index c4c78b3280bb60..56c74c10f7c109 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfmul.ll
@@ -27,7 +27,7 @@ define <vscale x 8 x bfloat> @bfmul_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x
 define <vscale x 8 x bfloat> @bfmul_u_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
 ; CHECK-LABEL: bfmul_u_pred:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bfmul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    bfmul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
   ret <vscale x 8 x bfloat> %res
@@ -57,7 +57,7 @@ define <vscale x 8 x bfloat> @bfmul_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z2.h, #0 // =0x0
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
-; CHECK-NEXT:    bfmul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    bfmul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer
   %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmul.u.nxv8bf16(<vscale x 8 x i1> %pg,
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
index f3a4b039492d09..135185b9bda329 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-bfsub.ll
@@ -27,7 +27,7 @@ define <vscale x 8 x bfloat> @bfsub_zeroing(<vscale x 8 x i1> %pg, <vscale x 8 x
 define <vscale x 8 x bfloat> @bfsub_u_pred(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b){
 ; CHECK-LABEL: bfsub_u_pred:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bfsub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    bfsub z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fsub.u.nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
   ret <vscale x 8 x bfloat> %res
@@ -57,7 +57,7 @@ define <vscale x 8 x bfloat> @bfsub_u_zeroing(<vscale x 8 x i1> %pg, <vscale x 8
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z2.h, #0 // =0x0
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z2.h
-; CHECK-NEXT:    bfsub z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    bfsub z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> zeroinitializer
   %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.fsub.u.nxv8bf16(<vscale x 8 x i1> %pg,



More information about the llvm-commits mailing list