[llvm] d32b8fd - [LLVM][AArch64] Replace aarch64.sve.ld by aarch64.sve.ldN.sret

Caroline Concatto via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 20 05:16:15 PDT 2022


Author: Caroline Concatto
Date: 2022-09-20T13:15:07+01:00
New Revision: d32b8fdbdb4b99a5cc21604db6211fc506eb1f9b

URL: https://github.com/llvm/llvm-project/commit/d32b8fdbdb4b99a5cc21604db6211fc506eb1f9b
DIFF: https://github.com/llvm/llvm-project/commit/d32b8fdbdb4b99a5cc21604db6211fc506eb1f9b.diff

LOG: [LLVM][AArch64] Replace aarch64.sve.ld by aarch64.sve.ldN.sret

This patch removes the intrinsic aarch64.sve.ldN from tablegen in favour of
using arch64.sve.ldN.sret.

Depends on: D133023

Differential Revision: https://reviews.llvm.org/D133025

Added: 
    llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/lib/IR/AutoUpgrade.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
    llvm/test/CodeGen/AArch64/sve-merging-stores.ll

Removed: 
    llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+imm-addr-mode.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+reg-addr-mode.ll


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 8d17ffe5cec26..da67afd3ae955 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -971,10 +971,6 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty],
                 [IntrReadMem, IntrArgMemOnly, ImmArg<ArgIndex<1>>]>;
 
-  class AdvSIMD_ManyVec_PredLoad_Intrinsic
-    : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMPointerToElt<0>],
-                [IntrReadMem, IntrArgMemOnly]>;
-
   class AdvSIMD_1Vec_PredLoad_Intrinsic
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
@@ -1569,10 +1565,6 @@ def int_aarch64_sve_tuple_set : AdvSIMD_SVE_Set_Vector_Tuple;
 
 def int_aarch64_sve_ld1   : AdvSIMD_1Vec_PredLoad_Intrinsic;
 
-def int_aarch64_sve_ld2 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
-def int_aarch64_sve_ld3 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
-def int_aarch64_sve_ld4 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
-
 def int_aarch64_sve_ld2_sret : AdvSIMD_2Vec_PredLoad_Intrinsic;
 def int_aarch64_sve_ld3_sret : AdvSIMD_3Vec_PredLoad_Intrinsic;
 def int_aarch64_sve_ld4_sret : AdvSIMD_4Vec_PredLoad_Intrinsic;

diff  --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 18ea2fd4c3366..cbe27f603600a 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -558,6 +558,22 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
                                         F->arg_begin()->getType());
       return true;
     }
+    static const Regex LdRegex("^aarch64\\.sve\\.ld[234](.nxv[a-z0-9]+|$)");
+    if (LdRegex.match(Name)) {
+      Type *ScalarTy =
+          dyn_cast<VectorType>(F->getReturnType())->getElementType();
+      ElementCount EC =
+          dyn_cast<VectorType>(F->arg_begin()->getType())->getElementCount();
+      Type *Ty = VectorType::get(ScalarTy, EC);
+      Intrinsic::ID ID =
+          StringSwitch<Intrinsic::ID>(Name)
+              .StartsWith("aarch64.sve.ld2", Intrinsic::aarch64_sve_ld2_sret)
+              .StartsWith("aarch64.sve.ld3", Intrinsic::aarch64_sve_ld3_sret)
+              .StartsWith("aarch64.sve.ld4", Intrinsic::aarch64_sve_ld4_sret)
+              .Default(Intrinsic::not_intrinsic);
+      NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Ty);
+      return true;
+    }
     if (Name.startswith("arm.neon.vclz")) {
       Type* args[2] = {
         F->arg_begin()->getType(),
@@ -3858,7 +3874,30 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
     NewCall = Builder.CreateCall(NewFn, Args);
     break;
   }
-
+  case Intrinsic::aarch64_sve_ld3_sret:
+  case Intrinsic::aarch64_sve_ld4_sret:
+  case Intrinsic::aarch64_sve_ld2_sret: {
+    StringRef Name = F->getName();
+    Name = Name.substr(5);
+    unsigned N = StringSwitch<unsigned>(Name)
+                     .StartsWith("aarch64.sve.ld2", 2)
+                     .StartsWith("aarch64.sve.ld3", 3)
+                     .StartsWith("aarch64.sve.ld4", 4)
+                     .Default(0);
+    ScalableVectorType *RetTy =
+        dyn_cast<ScalableVectorType>(F->getReturnType());
+    unsigned MinElts = RetTy->getMinNumElements() / N;
+    SmallVector<Value *, 2> Args(CI->args());
+    Value *NewLdCall = Builder.CreateCall(NewFn, Args);
+    Value *Ret = llvm::PoisonValue::get(RetTy);
+    for (unsigned I = 0; I < N; I++) {
+      Value *Idx = ConstantInt::get(Type::getInt64Ty(C), I * MinElts);
+      Value *SRet = Builder.CreateExtractValue(NewLdCall, I);
+      Ret = Builder.CreateInsertVector(RetTy, Ret, SRet, Idx);
+    }
+    NewCall = dyn_cast<CallInst>(Ret);
+    break;
+  }
   case Intrinsic::arm_neon_bfdot:
   case Intrinsic::arm_neon_bfmmla:
   case Intrinsic::arm_neon_bfmlalb:

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b482c29d9156d..c6639464f11e9 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -13840,61 +13840,6 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
   return true;
 }
 
-// Lower an SVE structured load intrinsic returning a tuple type to target
-// specific intrinsic taking the same input but returning a multi-result value
-// of the split tuple type.
-//
-// E.g. Lowering an LD3:
-//
-//  call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32(
-//                                                    <vscale x 4 x i1> %pred,
-//                                                    <vscale x 4 x i32>* %addr)
-//
-//  Output DAG:
-//
-//    t0: ch = EntryToken
-//        t2: nxv4i1,ch = CopyFromReg t0, Register:nxv4i1 %0
-//        t4: i64,ch = CopyFromReg t0, Register:i64 %1
-//    t5: nxv4i32,nxv4i32,nxv4i32,ch = AArch64ISD::SVE_LD3 t0, t2, t4
-//    t6: nxv12i32 = concat_vectors t5, t5:1, t5:2
-//
-// This is called pre-legalization to avoid widening/splitting issues with
-// non-power-of-2 tuple types used for LD3, such as nxv12i32.
-SDValue AArch64TargetLowering::LowerSVEStructLoad(unsigned Intrinsic,
-                                                  ArrayRef<SDValue> LoadOps,
-                                                  EVT VT, SelectionDAG &DAG,
-                                                  const SDLoc &DL) const {
-  assert(VT.isScalableVector() && "Can only lower scalable vectors");
-
-  unsigned N, Opcode;
-  static const std::pair<unsigned, std::pair<unsigned, unsigned>>
-      IntrinsicMap[] = {
-          {Intrinsic::aarch64_sve_ld2, {2, AArch64ISD::SVE_LD2_MERGE_ZERO}},
-          {Intrinsic::aarch64_sve_ld3, {3, AArch64ISD::SVE_LD3_MERGE_ZERO}},
-          {Intrinsic::aarch64_sve_ld4, {4, AArch64ISD::SVE_LD4_MERGE_ZERO}}};
-
-  std::tie(N, Opcode) = llvm::find_if(IntrinsicMap, [&](auto P) {
-                          return P.first == Intrinsic;
-                        })->second;
-  assert(VT.getVectorElementCount().getKnownMinValue() % N == 0 &&
-         "invalid tuple vector type!");
-
-  EVT SplitVT =
-      EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
-                       VT.getVectorElementCount().divideCoefficientBy(N));
-  assert(isTypeLegal(SplitVT));
-
-  SmallVector<EVT, 5> VTs(N, SplitVT);
-  VTs.push_back(MVT::Other); // Chain
-  SDVTList NodeTys = DAG.getVTList(VTs);
-
-  SDValue PseudoLoad = DAG.getNode(Opcode, DL, NodeTys, LoadOps);
-  SmallVector<SDValue, 4> PseudoLoadOps;
-  for (unsigned I = 0; I < N; ++I)
-    PseudoLoadOps.push_back(SDValue(PseudoLoad.getNode(), I));
-  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, PseudoLoadOps);
-}
-
 EVT AArch64TargetLowering::getOptimalMemOpType(
     const MemOp &Op, const AttributeList &FuncAttributes) const {
   bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
@@ -20400,20 +20345,6 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
       SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, DestVT, Opnds);
       return DAG.getMergeValues({Concat, Chain}, DL);
     }
-    case Intrinsic::aarch64_sve_ld2:
-    case Intrinsic::aarch64_sve_ld3:
-    case Intrinsic::aarch64_sve_ld4: {
-      SDLoc DL(N);
-      SDValue Chain = N->getOperand(0);
-      SDValue Mask = N->getOperand(2);
-      SDValue BasePtr = N->getOperand(3);
-      SDValue LoadOps[] = {Chain, Mask, BasePtr};
-      unsigned IntrinsicID =
-          cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
-      SDValue Result =
-          LowerSVEStructLoad(IntrinsicID, LoadOps, N->getValueType(0), DAG, DL);
-      return DAG.getMergeValues({Result, Chain}, DL);
-    }
     case Intrinsic::aarch64_rndr:
     case Intrinsic::aarch64_rndrrs: {
       unsigned IntrinsicID =

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 4875e786a1102..0a98d0b525a0f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -1041,8 +1041,6 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
                                          SDValue &Size,
                                          SelectionDAG &DAG) const;
-  SDValue LowerSVEStructLoad(unsigned Intrinsic, ArrayRef<SDValue> LoadOps,
-                             EVT VT, SelectionDAG &DAG, const SDLoc &DL) const;
 
   SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op,
                                                SelectionDAG &DAG) const;

diff  --git a/llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll b/llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll
new file mode 100644
index 0000000000000..f9715cbd7f1ef
--- /dev/null
+++ b/llvm/test/Bitcode/upgrade-aarch64-sve-intrinsics.ll
@@ -0,0 +1,74 @@
+; RUN: llvm-as %s -o - | llvm-dis - | FileCheck %s
+
+define <vscale x 32 x i8> @ld2.nxv32i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
+; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK-NEXT:  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
+; CHECK-NEXT:  %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
+; CHECK-NEXT:  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
+; CHECK-NEXT:  %res = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> %3, <vscale x 16 x i8> %4, i64 16)
+; CHECK-NEXT:  ret <vscale x 32 x i8> %res
+%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 48 x i8> @ld3.nxv48i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
+; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK-NEXT:  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
+; CHECK-NEXT:  %3 = call <vscale x 48 x i8> @llvm.vector.insert.nxv48i8.nxv16i8(<vscale x 48 x i8> poison, <vscale x 16 x i8> %2, i64 0)
+; CHECK-NEXT:  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
+; CHECK-NEXT:  %5 = call <vscale x 48 x i8> @llvm.vector.insert.nxv48i8.nxv16i8(<vscale x 48 x i8> %3, <vscale x 16 x i8> %4, i64 16)
+; CHECK-NEXT:  %6 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 2
+; CHECK-NEXT:  %res = call <vscale x 48 x i8> @llvm.vector.insert.nxv48i8.nxv16i8(<vscale x 48 x i8> %5, <vscale x 16 x i8> %6, i64 32)
+; CHECK-NEXT:  ret <vscale x 48 x i8> %res
+%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 48 x i8> %res
+}
+
+define <vscale x 64 x i8> @ld4.nxv64i8_lower_bound(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
+; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK-NEXT:  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
+; CHECK-NEXT:  %3 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> poison, <vscale x 16 x i8> %2, i64 0)
+; CHECK-NEXT:  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
+; CHECK-NEXT:  %5 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %3, <vscale x 16 x i8> %4, i64 16)
+; CHECK-NEXT:  %6 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 2
+; CHECK-NEXT:  %7 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %5, <vscale x 16 x i8> %6, i64 32)
+; CHECK-NEXT:  %8 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 3
+; CHECK-NEXT:  %res = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %7, <vscale x 16 x i8> %8, i64 48)
+; CHECK-NEXT:  ret <vscale x 64 x i8> %res
+%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 64 x i8> %res
+}
+
+; Check short mangling name
+
+; ldN intrinsic name without any element type
+define <vscale x 32 x i8> @ld2.nxv32i8_no_eltty(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
+; CHECK-LABEL:  @ld2.nxv32i8_no_eltty
+; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK-NEXT:  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
+; CHECK-NEXT:  %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
+; CHECK-NEXT:  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
+; CHECK-NEXT:  %res = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> %3, <vscale x 16 x i8> %4, i64 16)
+; CHECK-NEXT:  ret <vscale x 32 x i8> %res
+%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 32 x i8> %res
+}
+
+; ldN instrinsic name with only output type
+define <vscale x 32 x i8> @ld2.nxv32i8_no_predty_pty(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
+; CHECK-LABEL:  @ld2.nxv32i8_no_predty_pty
+; CHECK:  %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, i8* %base_ptr)
+; CHECK-NEXT:  %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
+; CHECK-NEXT:  %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
+; CHECK-NEXT:  %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
+; CHECK-NEXT:  %res = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> %3, <vscale x 16 x i8> %4, i64 16)
+; CHECK-NEXT:  ret <vscale x 32 x i8> %res
+%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 32 x i8> %res
+}
+
+declare <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 32 x i8> @llvm.aarch64.sve.ld2(<vscale x 16 x i1>, i8 *)
+declare <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8(<vscale x 16 x i1>, i8 *)

diff  --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
index b9815647a8a61..200737dde948c 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
@@ -29,10 +29,26 @@ define float @foo1(double* %x0, double* %x1, double* %x2) nounwind {
 entry:
   %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
   %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
-  %2 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x0)
-  %3 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x1)
+  %2 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x0)
+  %3 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x1)
   %4 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %1, double* %x2)
-  %call = call float @callee1(float 1.000000e+00, <vscale x 8 x double> %2, <vscale x 8 x double> %3, <vscale x 2 x double> %4)
+  %5 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  0
+  %6 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  1
+  %7 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  2
+  %8 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  3
+  %9 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> poison, <vscale x 2 x double> %5, i64 0)
+  %10 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %9, <vscale x 2 x double> %6, i64 2)
+  %11 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %10, <vscale x 2 x double> %7, i64 4)
+  %12 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %11, <vscale x 2 x double> %8, i64 6)
+  %13 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3,  0
+  %14 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3,  1
+  %15 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3,  2
+  %16 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3,  3
+  %17 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> undef, <vscale x 2 x double> %13, i64 0)
+  %18 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %17, <vscale x 2 x double> %14, i64 2)
+  %19 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %18, <vscale x 2 x double> %15, i64 4)
+  %20 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %19, <vscale x 2 x double> %16, i64 6)
+  %call = call float @callee1(float 1.000000e+00, <vscale x 8 x double> %12, <vscale x 8 x double> %20, <vscale x 2 x double> %4)
   ret float %call
 }
 
@@ -73,9 +89,25 @@ define float @foo2(double* %x0, double* %x1) nounwind {
 entry:
   %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
   %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
-  %2 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x0)
-  %3 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x1)
-  %call = call float @callee2(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, float 1.000000e+00, <vscale x 8 x double> %2, <vscale x 8 x double> %3)
+  %2 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x0)
+  %3 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x1)
+  %4 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  0
+  %5 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  1
+  %6 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  2
+  %7 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  3
+  %8 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> poison, <vscale x 2 x double> %4, i64 0)
+  %9 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %8, <vscale x 2 x double> %5, i64 2)
+  %10 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %9, <vscale x 2 x double> %6, i64 4)
+  %11 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %10, <vscale x 2 x double> %7, i64 6)
+  %12 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3,  0
+  %13 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3,  1
+  %14 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3,  2
+  %15 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3,  3
+  %16 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> poison, <vscale x 2 x double> %12, i64 0)
+  %17 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %16, <vscale x 2 x double> %13, i64 2)
+  %18 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %17, <vscale x 2 x double> %14, i64 4)
+  %19 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %18, <vscale x 2 x double> %15, i64 6)
+  %call = call float @callee2(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, float 1.000000e+00, <vscale x 8 x double> %11, <vscale x 8 x double> %19)
   ret float %call
 }
 
@@ -102,10 +134,24 @@ define float @foo3(double* %x0, double* %x1, double* %x2) nounwind {
 entry:
   %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
   %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %0)
-  %2 = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1> %1, double* %x0)
-  %3 = call <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1(<vscale x 2 x i1> %1, double* %x1)
+  %2 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x0)
+  %3 = call {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1> %1, double* %x1)
   %4 = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %1, double* %x2)
-  %call = call float @callee3(float 1.000000e+00, float 2.000000e+00, <vscale x 8 x double> %2, <vscale x 6 x double> %3, <vscale x 2 x double> %4)
+  %5 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  0
+  %6 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  1
+  %7 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  2
+  %8 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %2,  3
+  %9 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> poison, <vscale x 2 x double> %5, i64 0)
+  %10 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %9, <vscale x 2 x double> %6, i64 2)
+  %11 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %10, <vscale x 2 x double> %7, i64 4)
+  %12 = call <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double> %11, <vscale x 2 x double> %8, i64 6)
+  %13 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} %3,  0
+  %14 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3,  1
+  %15 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %3,  2
+  %16 = call <vscale x 6 x double> @llvm.vector.insert.nxv6f64.nx2f64(<vscale x 6 x double> poison, <vscale x 2 x double> %13, i64 0)
+  %17 = call <vscale x 6 x double> @llvm.vector.insert.nxv6f64.nx2f64(<vscale x 6 x double> %16 , <vscale x 2 x double> %14, i64 2)
+  %18 = call <vscale x 6 x double> @llvm.vector.insert.nxv6f64.nx2f64(<vscale x 6 x double> %17 , <vscale x 2 x double> %15, i64 4)
+  %call = call float @callee3(float 1.000000e+00, float 2.000000e+00, <vscale x 8 x double> %12, <vscale x 6 x double> %18, <vscale x 2 x double> %4)
   ret float %call
 }
 
@@ -435,9 +481,9 @@ declare float @callee3(float, float, <vscale x 8 x double>, <vscale x 6 x double
 
 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 immarg)
 declare <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1>)
-declare <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1(<vscale x 2 x i1>, double*)
-declare <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1(<vscale x 2 x i1>, double*)
+declare {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld4.sret.nxv2f64(<vscale x 2 x i1>, double*)
+declare {<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>} @llvm.aarch64.sve.ld3.sret.nxv2f64(<vscale x 2 x i1>, double*)
 declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, double*)
 declare double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>)
-declare <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv8f64(<vscale x 8 x double>, i32 immarg)
-declare <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv6f64(<vscale x 6 x double>, i32 immarg)
+declare <vscale x 8 x double> @llvm.vector.insert.nxv8f64.nx2f64(<vscale x 8 x double>, <vscale x 2 x double>, i64)
+declare <vscale x 6 x double> @llvm.vector.insert.nxv6f64.nx2f64(<vscale x 6 x double>, <vscale x 2 x double>, i64)

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+imm-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+imm-addr-mode.ll
deleted file mode 100644
index 18e552e5f4a3a..0000000000000
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+imm-addr-mode.ll
+++ /dev/null
@@ -1,539 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=sve < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=sme < %s | FileCheck %s
-
-; NOTE: invalid, upper and lower bound immediate values of the regimm
-; addressing mode are checked only for the byte version of each
-; instruction (`ld<N>b`), as the code for detecting the immediate is
-; common to all instructions, and varies only for the number of
-; elements of the structure store, which is <N> = 2, 3, 4.
-
-; ld2b
-define <vscale x 32 x i8> @ld2.nxv32i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld2.nxv32i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, #2, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 2
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8*
-%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 32 x i8> %res
-}
-
-define <vscale x 32 x i8> @ld2.nxv32i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld2.nxv32i8_lower_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, #-16, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -16
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 32 x i8> %res
-}
-
-define <vscale x 32 x i8> @ld2.nxv32i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld2.nxv32i8_upper_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, #14, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 14
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 32 x i8> %res
-}
-
-define <vscale x 32 x i8> @ld2.nxv32i8_not_multiple_of_2(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld2.nxv32i8_not_multiple_of_2:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #3
-; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 32 x i8> %res
-}
-
-define <vscale x 32 x i8> @ld2.nxv32i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld2.nxv32i8_outside_lower_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #-18
-; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -18
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 32 x i8> %res
-}
-
-define <vscale x 32 x i8> @ld2.nxv32i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld2.nxv32i8_outside_upper_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #16
-; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 16
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 32 x i8> %res
-}
-
-; ld2h
-define <vscale x 16 x i16> @ld2.nxv16i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16>* %addr) {
-; CHECK-LABEL: ld2.nxv16i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, #14, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 14
-%base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
-%res = call <vscale x 16 x i16> @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
-ret <vscale x 16 x i16> %res
-}
-
-define <vscale x 16 x half> @ld2.nxv16f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half>* %addr) {
-; CHECK-LABEL: ld2.nxv16f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, #-16, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 -16
-%base_ptr = bitcast <vscale x 8 x half>* %base to half *
-%res = call <vscale x 16 x half> @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
-ret <vscale x 16 x half> %res
-}
-
-define <vscale x 16 x bfloat> @ld2.nxv16bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat>* %addr) #0 {
-; CHECK-LABEL: ld2.nxv16bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, #12, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 12
-%base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
-%res = call <vscale x 16 x bfloat> @llvm.aarch64.sve.ld2.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
-ret <vscale x 16 x bfloat> %res
-}
-
-; ld2w
-define <vscale x 8 x i32> @ld2.nxv8i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32>* %addr) {
-; CHECK-LABEL: ld2.nxv8i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, #14, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 14
-%base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
-%res = call <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
-ret <vscale x 8 x i32> %res
-}
-
-define <vscale x 8 x float> @ld2.nxv8f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float>* %addr) {
-; CHECK-LABEL: ld2.nxv8f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, #-16, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -16
-%base_ptr = bitcast <vscale x 4 x float>* %base to float *
-%res = call <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
-ret <vscale x 8 x float> %res
-}
-
-; ld2d
-define <vscale x 4 x i64> @ld2.nxv4i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64>* %addr) {
-; CHECK-LABEL: ld2.nxv4i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, #14, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 14
-%base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
-%res = call <vscale x 4 x i64> @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
-ret <vscale x 4 x i64> %res
-}
-
-define <vscale x 4 x double> @ld2.nxv4f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double>* %addr) {
-; CHECK-LABEL: ld2.nxv4f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, #-16, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -16
-%base_ptr = bitcast <vscale x 2 x double>* %base to double *
-%res = call <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%base_ptr)
-ret <vscale x 4 x double> %res
-}
-
-; ld3b
-define <vscale x 48 x i8> @ld3.nxv48i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld3.nxv48i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, #3, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 48 x i8> %res
-}
-
-define <vscale x 48 x i8> @ld3.nxv48i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld3.nxv48i8_lower_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, #-24, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -24
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 48 x i8> %res
-}
-
-define <vscale x 48 x i8> @ld3.nxv48i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld3.nxv48i8_upper_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, #21, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 21
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 48 x i8> %res
-}
-
-define <vscale x 48 x i8> @ld3.nxv48i8_not_multiple_of_3_01(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_01:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #4
-; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 48 x i8> %res
-}
-
-define <vscale x 48 x i8> @ld3.nxv48i8_not_multiple_of_3_02(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_02:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #5
-; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 48 x i8> %res
-}
-
-define <vscale x 48 x i8> @ld3.nxv48i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld3.nxv48i8_outside_lower_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #-27
-; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -27
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 48 x i8> %res
-}
-
-define <vscale x 48 x i8> @ld3.nxv48i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld3.nxv48i8_outside_upper_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #24
-; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 24
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 48 x i8> %res
-}
-
-; ld3h
-define <vscale x 24 x i16> @ld3.nxv24i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16> *%addr) {
-; CHECK-LABEL: ld3.nxv24i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, #21, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 21
-%base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
-%res = call <vscale x 24 x i16> @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
-ret <vscale x 24 x i16> %res
-}
-
-define <vscale x 24 x half> @ld3.nxv24f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half> *%addr) {
-; CHECK-LABEL: ld3.nxv24f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, #21, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 21
-%base_ptr = bitcast <vscale x 8 x half>* %base to half *
-%res = call <vscale x 24 x half> @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
-ret <vscale x 24 x half> %res
-}
-
-define <vscale x 24 x bfloat> @ld3.nxv24bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat> *%addr) #0 {
-; CHECK-LABEL: ld3.nxv24bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, #-24, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -24
-%base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
-%res = call <vscale x 24 x bfloat> @llvm.aarch64.sve.ld3.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
-ret <vscale x 24 x bfloat> %res
-}
-
-; ld3w
-define <vscale x 12 x i32> @ld3.nxv12i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> *%addr) {
-; CHECK-LABEL: ld3.nxv12i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, #21, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 21
-%base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
-%res = call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
-ret <vscale x 12 x i32> %res
-}
-
-define <vscale x 12 x float> @ld3.nxv12f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float> *%addr) {
-; CHECK-LABEL: ld3.nxv12f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, #-24, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -24
-%base_ptr = bitcast <vscale x 4 x float>* %base to float *
-%res = call <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
-ret <vscale x 12 x float> %res
-}
-
-; ld3d
-define <vscale x 6 x i64> @ld3.nxv6i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> *%addr) {
-; CHECK-LABEL: ld3.nxv6i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, #21, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 21
-%base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
-%res = call <vscale x 6 x i64> @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
-ret <vscale x 6 x i64> %res
-}
-
-define <vscale x 6 x double> @ld3.nxv6f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double> *%addr) {
-; CHECK-LABEL: ld3.nxv6f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, #-24, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -24
-%base_ptr = bitcast <vscale x 2 x double>* %base to double *
-%res = call <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%base_ptr)
-ret <vscale x 6 x double> %res
-}
-
-; ; ld4b
-define <vscale x 64 x i8> @ld4.nxv64i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld4.nxv64i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, #4, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 64 x i8> %res
-}
-
-define <vscale x 64 x i8> @ld4.nxv64i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld4.nxv64i8_lower_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, #-32, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -32
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 64 x i8> %res
-}
-
-define <vscale x 64 x i8> @ld4.nxv64i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld4.nxv64i8_upper_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, #28, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 28
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 64 x i8> %res
-}
-
-define <vscale x 64 x i8> @ld4.nxv64i8_not_multiple_of_4_01(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_01:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #5
-; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 64 x i8> %res
-}
-
-define <vscale x 64 x i8> @ld4.nxv64i8_not_multiple_of_4_02(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_02:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #6
-; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 6
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 64 x i8> %res
-}
-
-define <vscale x 64 x i8> @ld4.nxv64i8_not_multiple_of_4_03(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_03:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #7
-; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 7
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 64 x i8> %res
-}
-
-define <vscale x 64 x i8> @ld4.nxv64i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld4.nxv64i8_outside_lower_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    mov x9, #-576
-; CHECK-NEXT:    lsr x8, x8, #4
-; CHECK-NEXT:    mul x8, x8, x9
-; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #4) #9)
-; xM = -9 * 2^6
-; xP = RDVL * 2^-4
-; xOFFSET = RDVL * 2^-4 * -9 * 2^6 = RDVL * -36
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -36
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 64 x i8> %res
-}
-
-define <vscale x 64 x i8> @ld4.nxv64i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
-; CHECK-LABEL: ld4.nxv64i8_outside_upper_bound:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #1
-; CHECK-NEXT:    mov w9, #512
-; CHECK-NEXT:    lsr x8, x8, #4
-; CHECK-NEXT:    mul x8, x8, x9
-; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
-; CHECK-NEXT:    ret
-; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #16) #2)
-; xM = 2^9
-; xP = RDVL * 2^-4
-; xOFFSET = RDVL * 2^-4 * 2^9 = RDVL * 32
-%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 32
-%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
-%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
-ret <vscale x 64 x i8> %res
-}
-
-; ld4h
-define <vscale x 32 x i16> @ld4.nxv32i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16> *%addr) {
-; CHECK-LABEL: ld4.nxv32i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, #8, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 8
-%base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
-%res = call <vscale x 32 x i16> @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
-ret <vscale x 32 x i16> %res
-}
-
-define <vscale x 32 x half> @ld4.nxv32f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half> *%addr) {
-; CHECK-LABEL: ld4.nxv32f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, #28, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 28
-%base_ptr = bitcast <vscale x 8 x half>* %base to half *
-%res = call <vscale x 32 x half> @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
-ret <vscale x 32 x half> %res
-}
-
-define <vscale x 32 x bfloat> @ld4.nxv32bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat> *%addr) #0 {
-; CHECK-LABEL: ld4.nxv32bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, #-32, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -32
-%base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
-%res = call <vscale x 32 x bfloat> @llvm.aarch64.sve.ld4.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
-ret <vscale x 32 x bfloat> %res
-}
-
-; ld4w
-define <vscale x 16 x i32> @ld4.nxv16i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> *%addr) {
-; CHECK-LABEL: ld4.nxv16i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, #28, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 28
-%base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
-%res = call <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
-ret <vscale x 16 x i32> %res
-}
-
-define <vscale x 16 x float> @ld4.nxv16f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float>* %addr) {
-; CHECK-LABEL: ld4.nxv16f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, #-32, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -32
-%base_ptr = bitcast <vscale x 4 x float>* %base to float *
-%res = call <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
-ret <vscale x 16 x float> %res
-}
-
-; ld4d
-define <vscale x 8 x i64> @ld4.nxv8i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> *%addr) {
-; CHECK-LABEL: ld4.nxv8i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, #28, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 28
-%base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
-%res = call <vscale x 8 x i64> @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
-ret <vscale x 8 x i64> %res
-}
-
-define <vscale x 8 x double> @ld4.nxv8f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double> *%addr) {
-; CHECK-LABEL: ld4.nxv8f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, #-32, mul vl]
-; CHECK-NEXT:    ret
-%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -32
-%base_ptr = bitcast <vscale x 2 x double>* %base to double *
-%res = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double * %base_ptr)
-ret <vscale x 8 x double> %res
-}
-
-declare <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
-declare <vscale x 16 x i16> @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x i64> @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 16 x half> @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
-declare <vscale x 16 x bfloat> @llvm.aarch64.sve.ld2.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
-declare <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
-declare <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
-
-declare <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
-declare <vscale x 24 x i16> @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 6 x i64> @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 24 x half> @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
-declare <vscale x 24 x bfloat> @llvm.aarch64.sve.ld3.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
-declare <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
-declare <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
-
-declare <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
-declare <vscale x 32 x i16> @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 8 x i64> @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 32 x half> @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
-declare <vscale x 32 x bfloat> @llvm.aarch64.sve.ld4.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
-declare <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
-declare <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
-
-; +bf16 is required for the bfloat version.
-attributes #0 = { "target-features"="+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+reg-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+reg-addr-mode.ll
deleted file mode 100644
index bf207c66d5332..0000000000000
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+reg-addr-mode.ll
+++ /dev/null
@@ -1,285 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=sve < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=sme < %s | FileCheck %s
-
-; ld2b
-define <vscale x 32 x i8> @ld2.nxv32i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
-; CHECK-LABEL: ld2.nxv32i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i8, i8 *  %addr, i64 %a
-%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
-ret <vscale x 32 x i8> %res
-}
-
-; ld2h
-define <vscale x 16 x i16> @ld2.nxv16i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
-; CHECK-LABEL: ld2.nxv16i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i16, i16 *  %addr, i64 %a
-%res = call <vscale x 16 x i16> @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
-ret <vscale x 16 x i16> %res
-}
-
-define <vscale x 16 x half> @ld2.nxv16f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
-; CHECK-LABEL: ld2.nxv16f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr half, half *  %addr, i64 %a
-%res = call <vscale x 16 x half> @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%addr2)
-ret <vscale x 16 x half> %res
-}
-
-define <vscale x 16 x bfloat> @ld2.nxv16bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
-; CHECK-LABEL: ld2.nxv16bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
-%res = call <vscale x 16 x bfloat> @llvm.aarch64.sve.ld2.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
-ret <vscale x 16 x bfloat> %res
-}
-
-; ld2w
-define <vscale x 8 x i32> @ld2.nxv8i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
-; CHECK-LABEL: ld2.nxv8i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i32, i32 *  %addr, i64 %a
-%res = call <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
-ret <vscale x 8 x i32> %res
-}
-
-define <vscale x 8 x float> @ld2.nxv8f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
-; CHECK-LABEL: ld2.nxv8f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr float, float *  %addr, i64 %a
-%res = call <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%addr2)
-ret <vscale x 8 x float> %res
-}
-
-; ld2d
-define <vscale x 4 x i64> @ld2.nxv4i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
-; CHECK-LABEL: ld2.nxv4i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i64, i64 *  %addr, i64 %a
-%res = call <vscale x 4 x i64> @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
-ret <vscale x 4 x i64> %res
-}
-
-define <vscale x 4 x double> @ld2.nxv4f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
-; CHECK-LABEL: ld2.nxv4f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr double, double *  %addr, i64 %a
-%res = call <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%addr2)
-ret <vscale x 4 x double> %res
-}
-
-; ld3b
-define <vscale x 48 x i8> @ld3.nxv48i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
-; CHECK-LABEL: ld3.nxv48i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i8, i8 *  %addr, i64 %a
-%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
-ret <vscale x 48 x i8> %res
-}
-
-; ld3h
-define <vscale x 24 x i16> @ld3.nxv24i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
-; CHECK-LABEL: ld3.nxv24i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i16, i16 *  %addr, i64 %a
-%res = call <vscale x 24 x i16> @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
-ret <vscale x 24 x i16> %res
-}
-
-define <vscale x 24 x half> @ld3.nxv24f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
-; CHECK-LABEL: ld3.nxv24f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr half, half *  %addr, i64 %a
-%res = call <vscale x 24 x half> @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%addr2)
-ret <vscale x 24 x half> %res
-}
-
-define <vscale x 24 x bfloat> @ld3.nxv24bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
-; CHECK-LABEL: ld3.nxv24bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
-%res = call <vscale x 24 x bfloat> @llvm.aarch64.sve.ld3.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
-ret <vscale x 24 x bfloat> %res
-}
-
-; ld3w
-define <vscale x 12 x i32> @ld3.nxv12i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
-; CHECK-LABEL: ld3.nxv12i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, x1, lsl #2]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i32, i32 *  %addr, i64 %a
-%res = call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
-ret <vscale x 12 x i32> %res
-}
-
-define <vscale x 12 x float> @ld3.nxv12f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
-; CHECK-LABEL: ld3.nxv12f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, x1, lsl #2]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr float, float *  %addr, i64 %a
-%res = call <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%addr2)
-ret <vscale x 12 x float> %res
-}
-
-; ld3d
-define <vscale x 6 x i64> @ld3.nxv6i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
-; CHECK-LABEL: ld3.nxv6i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, x1, lsl #3]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i64, i64 *  %addr, i64 %a
-%res = call <vscale x 6 x i64> @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
-ret <vscale x 6 x i64> %res
-}
-
-define <vscale x 6 x double> @ld3.nxv6f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
-; CHECK-LABEL: ld3.nxv6f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, x1, lsl #3]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr double, double *  %addr, i64 %a
-%res = call <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%addr2)
-ret <vscale x 6 x double> %res
-}
-
-; ld4b
-define <vscale x 64 x i8> @ld4.nxv64i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
-; CHECK-LABEL: ld4.nxv64i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i8, i8 *  %addr, i64 %a
-%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
-ret <vscale x 64 x i8> %res
-}
-
-; ld4h
-define <vscale x 32 x i16> @ld4.nxv32i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
-; CHECK-LABEL: ld4.nxv32i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i16, i16 *  %addr, i64 %a
-%res = call <vscale x 32 x i16> @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
-ret <vscale x 32 x i16> %res
-}
-
-define <vscale x 32 x half> @ld4.nxv32f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
-; CHECK-LABEL: ld4.nxv32f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr half, half *  %addr, i64 %a
-%res = call <vscale x 32 x half> @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%addr2)
-ret <vscale x 32 x half> %res
-}
-
-define <vscale x 32 x bfloat> @ld4.nxv32bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
-; CHECK-LABEL: ld4.nxv32bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
-%res = call <vscale x 32 x bfloat> @llvm.aarch64.sve.ld4.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
-ret <vscale x 32 x bfloat> %res
-}
-
-; ld4w
-define <vscale x 16 x i32> @ld4.nxv16i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
-; CHECK-LABEL: ld4.nxv16i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, x1, lsl #2]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i32, i32 *  %addr, i64 %a
-%res = call <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
-ret <vscale x 16 x i32> %res
-}
-
-define <vscale x 16 x float> @ld4.nxv16f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
-; CHECK-LABEL: ld4.nxv16f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, x1, lsl #2]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr float, float *  %addr, i64 %a
-%res = call <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%addr2)
-ret <vscale x 16 x float> %res
-}
-
-; ld4d
-define <vscale x 8 x i64> @ld4.nxv8i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
-; CHECK-LABEL: ld4.nxv8i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, x1, lsl #3]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr i64, i64 *  %addr, i64 %a
-%res = call <vscale x 8 x i64> @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
-ret <vscale x 8 x i64> %res
-}
-
-define <vscale x 8 x double> @ld4.nxv8f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
-; CHECK-LABEL: ld4.nxv8f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, x1, lsl #3]
-; CHECK-NEXT:    ret
-%addr2 = getelementptr double, double *  %addr, i64 %a
-%res = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%addr2)
-ret <vscale x 8 x double> %res
-}
-
-declare <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
-declare <vscale x 16 x i16> @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x i64> @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 16 x half> @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
-declare <vscale x 16 x bfloat> @llvm.aarch64.sve.ld2.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
-declare <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
-declare <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
-
-declare <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
-declare <vscale x 24 x i16> @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 6 x i64> @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 24 x half> @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
-declare <vscale x 24 x bfloat> @llvm.aarch64.sve.ld3.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
-declare <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
-declare <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
-
-declare <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
-declare <vscale x 32 x i16> @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 8 x i64> @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 32 x half> @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
-declare <vscale x 32 x bfloat> @llvm.aarch64.sve.ld4.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
-declare <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
-declare <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
-
-; +bf16 is required for the bfloat version.
-attributes #0 = { "target-features"="+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
index 423bebeb2bdc4..b3db0abc19670 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-loads.ll
@@ -603,270 +603,6 @@ define <vscale x 2 x double> @ldnt1d_f64(<vscale x 2 x i1> %pred, double* %addr)
   ret <vscale x 2 x double> %res
 }
 
-;
-; LD2B
-;
-
-define <vscale x 32 x i8> @ld2b_i8(<vscale x 16 x i1> %pred, i8* %addr) {
-; CHECK-LABEL: ld2b_i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %pred, i8* %addr)
-  ret <vscale x 32 x i8> %res
-}
-
-;
-; LD2H
-;
-
-define <vscale x 16 x i16> @ld2h_i16(<vscale x 8 x i1> %pred, i16* %addr) {
-; CHECK-LABEL: ld2h_i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 16 x i16> @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1> %pred, i16* %addr)
-  ret <vscale x 16 x i16> %res
-}
-
-define <vscale x 16 x half> @ld2h_f16(<vscale x 8 x i1> %pred, half* %addr) {
-; CHECK-LABEL: ld2h_f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 16 x half> @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1> %pred, half* %addr)
-  ret <vscale x 16 x half> %res
-}
-
-define <vscale x 16 x bfloat> @ld2h_bf16(<vscale x 8 x i1> %pred, bfloat* %addr) {
-; CHECK-LABEL: ld2h_bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 16 x bfloat> @llvm.aarch64.sve.ld2.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %pred, bfloat* %addr)
-  ret <vscale x 16 x bfloat> %res
-}
-
-;
-; LD2W
-;
-
-define <vscale x 8 x i32> @ld2w_i32(<vscale x 4 x i1> %pred, i32* %addr) {
-; CHECK-LABEL: ld2w_i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1> %pred, i32* %addr)
-  ret <vscale x 8 x i32> %res
-}
-
-define <vscale x 8 x float> @ld2w_f32(<vscale x 4 x i1> %pred, float* %addr) {
-; CHECK-LABEL: ld2w_f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1> %pred, float* %addr)
-  ret <vscale x 8 x float> %res
-}
-
-;
-; LD2D
-;
-
-define <vscale x 4 x i64> @ld2d_i64(<vscale x 2 x i1> %pred, i64* %addr) {
-; CHECK-LABEL: ld2d_i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 4 x i64> @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1> %pred, i64* %addr)
-  ret <vscale x 4 x i64> %res
-}
-
-define <vscale x 4 x double> @ld2d_f64(<vscale x 2 x i1> %pred, double* %addr) {
-; CHECK-LABEL: ld2d_f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1> %pred, double* %addr)
-  ret <vscale x 4 x double> %res
-}
-
-;
-; LD3B
-;
-
-define <vscale x 48 x i8> @ld3b_i8(<vscale x 16 x i1> %pred, i8* %addr) {
-; CHECK-LABEL: ld3b_i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %pred, i8* %addr)
-  ret <vscale x 48 x i8> %res
-}
-
-;
-; LD3H
-;
-
-define <vscale x 24 x i16> @ld3h_i16(<vscale x 8 x i1> %pred, i16* %addr) {
-; CHECK-LABEL: ld3h_i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 24 x i16> @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1> %pred, i16* %addr)
-  ret <vscale x 24 x i16> %res
-}
-
-define <vscale x 24 x half> @ld3h_f16(<vscale x 8 x i1> %pred, half* %addr) {
-; CHECK-LABEL: ld3h_f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 24 x half> @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1> %pred, half* %addr)
-  ret <vscale x 24 x half> %res
-}
-
-define <vscale x 24 x bfloat> @ld3h_bf16(<vscale x 8 x i1> %pred, bfloat* %addr) {
-; CHECK-LABEL: ld3h_bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 24 x bfloat> @llvm.aarch64.sve.ld3.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %pred, bfloat* %addr)
-  ret <vscale x 24 x bfloat> %res
-}
-
-;
-; LD3W
-;
-
-define <vscale x 12 x i32> @ld3w_i32(<vscale x 4 x i1> %pred, i32* %addr) {
-; CHECK-LABEL: ld3w_i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3w { z0.s, z1.s, z2.s }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1> %pred, i32* %addr)
-  ret <vscale x 12 x i32> %res
-}
-
-define <vscale x 12 x float> @ld3w_f32(<vscale x 4 x i1> %pred, float* %addr) {
-; CHECK-LABEL: ld3w_f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3w { z0.s, z1.s, z2.s }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1> %pred, float* %addr)
-  ret <vscale x 12 x float> %res
-}
-
-;
-; LD3D
-;
-
-define <vscale x 6 x i64> @ld3d_i64(<vscale x 2 x i1> %pred, i64* %addr) {
-; CHECK-LABEL: ld3d_i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3d { z0.d, z1.d, z2.d }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 6 x i64> @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1> %pred, i64* %addr)
-  ret <vscale x 6 x i64> %res
-}
-
-define <vscale x 6 x double> @ld3d_f64(<vscale x 2 x i1> %pred, double* %addr) {
-; CHECK-LABEL: ld3d_f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld3d { z0.d, z1.d, z2.d }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1> %pred, double* %addr)
-  ret <vscale x 6 x double> %res
-}
-
-;
-; LD4B
-;
-
-define <vscale x 64 x i8> @ld4b_i8(<vscale x 16 x i1> %pred, i8* %addr) {
-; CHECK-LABEL: ld4b_i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %pred, i8* %addr)
-  ret <vscale x 64 x i8> %res
-}
-
-;
-; LD4H
-;
-
-define <vscale x 32 x i16> @ld4h_i16(<vscale x 8 x i1> %pred, i16* %addr) {
-; CHECK-LABEL: ld4h_i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 32 x i16> @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1> %pred, i16* %addr)
-  ret <vscale x 32 x i16> %res
-}
-
-define <vscale x 32 x half> @ld4h_f16(<vscale x 8 x i1> %pred, half* %addr) {
-; CHECK-LABEL: ld4h_f16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 32 x half> @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1> %pred, half* %addr)
-  ret <vscale x 32 x half> %res
-}
-
-define <vscale x 32 x bfloat> @ld4h_bf16(<vscale x 8 x i1> %pred, bfloat* %addr) {
-; CHECK-LABEL: ld4h_bf16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 32 x bfloat> @llvm.aarch64.sve.ld4.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %pred, bfloat* %addr)
-  ret <vscale x 32 x bfloat> %res
-}
-
-;
-; LD4W
-;
-
-define <vscale x 16 x i32> @ld4w_i32(<vscale x 4 x i1> %pred, i32* %addr) {
-; CHECK-LABEL: ld4w_i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1> %pred, i32* %addr)
-  ret <vscale x 16 x i32> %res
-}
-
-define <vscale x 16 x float> @ld4w_f32(<vscale x 4 x i1> %pred, float* %addr) {
-; CHECK-LABEL: ld4w_f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1> %pred, float* %addr)
-  ret <vscale x 16 x float> %res
-}
-
-;
-; LD4D
-;
-
-define <vscale x 8 x i64> @ld4d_i64(<vscale x 2 x i1> %pred, i64* %addr) {
-; CHECK-LABEL: ld4d_i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x i64> @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1> %pred, i64* %addr)
-  ret <vscale x 8 x i64> %res
-}
-
-define <vscale x 8 x double> @ld4d_f64(<vscale x 2 x i1> %pred, double* %addr) {
-; CHECK-LABEL: ld4d_f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0]
-; CHECK-NEXT:    ret
-  %res = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1> %pred, double* %addr)
-  ret <vscale x 8 x double> %res
-}
-
 
 declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1>, i8*)
 declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1>, i16*)
@@ -886,33 +622,6 @@ declare <vscale x 8 x bfloat> @llvm.aarch64.sve.ldnt1.nxv8bf16(<vscale x 8 x i1>
 declare <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.nxv4f32(<vscale x 4 x i1>, float*)
 declare <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.nxv2f64(<vscale x 2 x i1>, double*)
 
-declare <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
-declare <vscale x 16 x i16> @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 4 x i64> @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 16 x half> @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
-declare <vscale x 16 x bfloat> @llvm.aarch64.sve.ld2.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
-declare <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
-declare <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
-
-declare <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
-declare <vscale x 24 x i16> @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 6 x i64> @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 24 x half> @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
-declare <vscale x 24 x bfloat> @llvm.aarch64.sve.ld3.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
-declare <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
-declare <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
-
-declare <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
-declare <vscale x 32 x i16> @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
-declare <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
-declare <vscale x 8 x i64> @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
-declare <vscale x 32 x half> @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
-declare <vscale x 32 x bfloat> @llvm.aarch64.sve.ld4.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
-declare <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
-declare <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
-
 declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
 declare <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double>, <2 x double>, i64)
 declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)

diff  --git a/llvm/test/CodeGen/AArch64/sve-merging-stores.ll b/llvm/test/CodeGen/AArch64/sve-merging-stores.ll
index 66a526f5c0d56..06c6ff1a6e522 100644
--- a/llvm/test/CodeGen/AArch64/sve-merging-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-merging-stores.ll
@@ -3,10 +3,7 @@
 %complex = type { { double, double } }
 
 ; Function Attrs: argmemonly nounwind readonly
-declare <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv4f64(<vscale x 4 x double>, i32 immarg) #3
-
-; Function Attrs: argmemonly nounwind readonly
-declare <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1(<vscale x 2 x i1>, double*) #3
+declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1>, double*) #3
 
 ; Function Attrs: nounwind readnone
 declare double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>) #2
@@ -20,10 +17,10 @@ define void @foo1(%complex* %outval, <vscale x 2 x i1> %pred, double *%inptr) {
 ; CHECK-NEXT: str q2, [x0]
   %realp = getelementptr inbounds %complex, %complex* %outval, i64 0, i32 0, i32 0
   %imagp = getelementptr inbounds %complex, %complex* %outval, i64 0, i32 0, i32 1
-  %1 = call <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1(<vscale x 2 x i1> %pred, double* nonnull %inptr)
-  %2 = call <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv4f64(<vscale x 4 x double> %1, i32 0)
+  %1 = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv2f64(<vscale x 2 x i1> %pred, double* nonnull %inptr)
+  %2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %1, 0
   %3 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %2)
-  %4 = call <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv4f64(<vscale x 4 x double> %1, i32 1)
+  %4 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %1, 1
   %5 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %4)
   store double %3, double* %realp, align 8
   store double %5, double* %imagp, align 8


        


More information about the llvm-commits mailing list