[llvm] cfe22cd - [AArch64][SVE] Add new ld<n> intrinsics that return a struct of vscale types

Bradley Smith via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 22 07:43:08 PDT 2021


Author: Bradley Smith
Date: 2021-10-22T14:13:17Z
New Revision: cfe22cd4ef9d4ab48b85e96711e11113ce85e9f7

URL: https://github.com/llvm/llvm-project/commit/cfe22cd4ef9d4ab48b85e96711e11113ce85e9f7
DIFF: https://github.com/llvm/llvm-project/commit/cfe22cd4ef9d4ab48b85e96711e11113ce85e9f7.diff

LOG: [AArch64][SVE] Add new ld<n> intrinsics that return a struct of vscale types

This will allow us to reuse existing interleaved load logic in
lowerInterleavedLoad that exists for neon types, but for SVE fixed
types.

The goal eventually will be to replace the existing ld<n> intriniscs
with these, once a migration path has been sorted out.

Differential Revision: https://reviews.llvm.org/D112078

Added: 
    llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index f05d4c01d010f..c586af45f34d2 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -962,6 +962,25 @@ let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
                  LLVMPointerToElt<0>],
                 [IntrReadMem, IntrArgMemOnly]>;
 
+  class AdvSIMD_2Vec_PredLoad_Intrinsic
+    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                 LLVMPointerToElt<0>],
+                [IntrReadMem, IntrArgMemOnly]>;
+
+  class AdvSIMD_3Vec_PredLoad_Intrinsic
+    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                 LLVMPointerToElt<0>],
+                [IntrReadMem, IntrArgMemOnly]>;
+
+  class AdvSIMD_4Vec_PredLoad_Intrinsic
+    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>,
+                 LLVMMatchType<0>],
+                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                 LLVMPointerToElt<0>],
+                [IntrReadMem, IntrArgMemOnly]>;
+
   class AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic
     : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
@@ -1535,6 +1554,10 @@ def int_aarch64_sve_ld2 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
 def int_aarch64_sve_ld3 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
 def int_aarch64_sve_ld4 : AdvSIMD_ManyVec_PredLoad_Intrinsic;
 
+def int_aarch64_sve_ld2_sret : AdvSIMD_2Vec_PredLoad_Intrinsic;
+def int_aarch64_sve_ld3_sret : AdvSIMD_3Vec_PredLoad_Intrinsic;
+def int_aarch64_sve_ld4_sret : AdvSIMD_4Vec_PredLoad_Intrinsic;
+
 def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
 def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic;
 def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic;

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 241b86a26c0d0..7fe1ecaa68bbe 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -286,7 +286,8 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
   void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
   void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
   void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, unsigned Scale,
-                            unsigned Opc_rr, unsigned Opc_ri);
+                            unsigned Opc_rr, unsigned Opc_ri,
+                            bool IsIntr = false);
 
   bool SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base, SDValue &OffImm);
   /// SVE Reg+Imm addressing mode.
@@ -1487,7 +1488,7 @@ AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr,
 
 void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs,
                                                unsigned Scale, unsigned Opc_ri,
-                                               unsigned Opc_rr) {
+                                               unsigned Opc_rr, bool IsIntr) {
   assert(Scale < 4 && "Invalid scaling value.");
   SDLoc DL(N);
   EVT VT = N->getValueType(0);
@@ -1497,11 +1498,11 @@ void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs,
   SDValue Base, Offset;
   unsigned Opc;
   std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore(
-      N, Opc_rr, Opc_ri, N->getOperand(2),
+      N, Opc_rr, Opc_ri, N->getOperand(IsIntr ? 3 : 2),
       CurDAG->getTargetConstant(0, DL, MVT::i64), Scale);
 
-  SDValue Ops[] = {N->getOperand(1), // Predicate
-                   Base,             // Memory operand
+  SDValue Ops[] = {N->getOperand(IsIntr ? 2 : 1), // Predicate
+                   Base,                          // Memory operand
                    Offset, Chain};
 
   const EVT ResTys[] = {MVT::Untyped, MVT::Other};
@@ -3894,6 +3895,69 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
     case Intrinsic::aarch64_ld64b:
       SelectLoad(Node, 8, AArch64::LD64B, AArch64::x8sub_0);
       return;
+    case Intrinsic::aarch64_sve_ld2_sret: {
+      if (VT == MVT::nxv16i8) {
+        SelectPredicatedLoad(Node, 2, 0, AArch64::LD2B_IMM, AArch64::LD2B,
+                             true);
+        return;
+      } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
+                 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
+        SelectPredicatedLoad(Node, 2, 1, AArch64::LD2H_IMM, AArch64::LD2H,
+                             true);
+        return;
+      } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
+        SelectPredicatedLoad(Node, 2, 2, AArch64::LD2W_IMM, AArch64::LD2W,
+                             true);
+        return;
+      } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
+        SelectPredicatedLoad(Node, 2, 3, AArch64::LD2D_IMM, AArch64::LD2D,
+                             true);
+        return;
+      }
+      break;
+    }
+    case Intrinsic::aarch64_sve_ld3_sret: {
+      if (VT == MVT::nxv16i8) {
+        SelectPredicatedLoad(Node, 3, 0, AArch64::LD3B_IMM, AArch64::LD3B,
+                             true);
+        return;
+      } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
+                 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
+        SelectPredicatedLoad(Node, 3, 1, AArch64::LD3H_IMM, AArch64::LD3H,
+                             true);
+        return;
+      } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
+        SelectPredicatedLoad(Node, 3, 2, AArch64::LD3W_IMM, AArch64::LD3W,
+                             true);
+        return;
+      } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
+        SelectPredicatedLoad(Node, 3, 3, AArch64::LD3D_IMM, AArch64::LD3D,
+                             true);
+        return;
+      }
+      break;
+    }
+    case Intrinsic::aarch64_sve_ld4_sret: {
+      if (VT == MVT::nxv16i8) {
+        SelectPredicatedLoad(Node, 4, 0, AArch64::LD4B_IMM, AArch64::LD4B,
+                             true);
+        return;
+      } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
+                 (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
+        SelectPredicatedLoad(Node, 4, 1, AArch64::LD4H_IMM, AArch64::LD4H,
+                             true);
+        return;
+      } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
+        SelectPredicatedLoad(Node, 4, 2, AArch64::LD4W_IMM, AArch64::LD4W,
+                             true);
+        return;
+      } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
+        SelectPredicatedLoad(Node, 4, 3, AArch64::LD4D_IMM, AArch64::LD4D,
+                             true);
+        return;
+      }
+      break;
+    }
     }
   } break;
   case ISD::INTRINSIC_WO_CHAIN: {

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll
new file mode 100644
index 0000000000000..c056cdf3bbb61
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll
@@ -0,0 +1,568 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=sve < %s | FileCheck %s
+
+; NOTE: invalid, upper and lower bound immediate values of the regimm
+; addressing mode are checked only for the byte version of each
+; instruction (`ld<N>b`), as the code for detecting the immediate is
+; common to all instructions, and varies only for the number of
+; elements of the structure store, which is <N> = 2, 3, 4.
+
+; ld2b
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #2
+; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 2
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8*
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8_lower_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #-16
+; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -16
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8_upper_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #14
+; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 14
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_not_multiple_of_2(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8_not_multiple_of_2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #3
+; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8_outside_lower_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #-18
+; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -18
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8_outside_upper_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #16
+; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 16
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+; ld2h
+define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld2.nxv16i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16>* %addr) {
+; CHECK-LABEL: ld2.nxv16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #14
+; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 14
+  %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
+  ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+}
+
+define { <vscale x 8 x half>, <vscale x 8 x half> } @ld2.nxv16f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half>* %addr) {
+; CHECK-LABEL: ld2.nxv16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #-16
+; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 -16
+  %base_ptr = bitcast <vscale x 8 x half>* %base to half *
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
+  ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld2.nxv16bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat>* %addr) #0 {
+; CHECK-LABEL: ld2.nxv16bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #12
+; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 12
+  %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
+  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+}
+
+; ld2w
+define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld2.nxv8i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32>* %addr) {
+; CHECK-LABEL: ld2.nxv8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #14
+; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 14
+  %base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
+  ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float> } @ld2.nxv8f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float>* %addr) {
+; CHECK-LABEL: ld2.nxv8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #-16
+; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -16
+  %base_ptr = bitcast <vscale x 4 x float>* %base to float *
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
+  ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+; ld2d
+define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld2.nxv4i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64>* %addr) {
+; CHECK-LABEL: ld2.nxv4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #14
+; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 14
+  %base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
+  ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double> } @ld2.nxv4f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double>* %addr) {
+; CHECK-LABEL: ld2.nxv4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #-16
+; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -16
+  %base_ptr = bitcast <vscale x 2 x double>* %base to double *
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%base_ptr)
+  ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+; ld3b
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #3
+; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_lower_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #-24
+; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -24
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_upper_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #21
+; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 21
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_not_multiple_of_3_01(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_01:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #4
+; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_not_multiple_of_3_02(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_02:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #5
+; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_outside_lower_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #-27
+; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -27
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_outside_upper_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #24
+; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 24
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+; ld3h
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld3.nxv24i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16> *%addr) {
+; CHECK-LABEL: ld3.nxv24i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #21
+; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 21
+  %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
+  ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+}
+
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld3.nxv24f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half> *%addr) {
+; CHECK-LABEL: ld3.nxv24f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #21
+; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 21
+  %base_ptr = bitcast <vscale x 8 x half>* %base to half *
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
+  ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld3.nxv24bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat> *%addr) #0 {
+; CHECK-LABEL: ld3.nxv24bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #-24
+; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -24
+  %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
+  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+}
+
+; ld3w
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld3.nxv12i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> *%addr) {
+; CHECK-LABEL: ld3.nxv12i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #21
+; CHECK-NEXT:    ld3w { z0.s, z1.s, z2.s }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 21
+  %base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
+  ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld3.nxv12f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float> *%addr) {
+; CHECK-LABEL: ld3.nxv12f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #-24
+; CHECK-NEXT:    ld3w { z0.s, z1.s, z2.s }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -24
+  %base_ptr = bitcast <vscale x 4 x float>* %base to float *
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
+  ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+; ld3d
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld3.nxv6i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> *%addr) {
+; CHECK-LABEL: ld3.nxv6i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #21
+; CHECK-NEXT:    ld3d { z0.d, z1.d, z2.d }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 21
+  %base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
+  ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld3.nxv6f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double> *%addr) {
+; CHECK-LABEL: ld3.nxv6f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #-24
+; CHECK-NEXT:    ld3d { z0.d, z1.d, z2.d }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -24
+  %base_ptr = bitcast <vscale x 2 x double>* %base to double *
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%base_ptr)
+  ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+; ; ld4b
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #4
+; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_lower_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #-32
+; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -32
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_upper_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #28
+; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 28
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_not_multiple_of_4_01(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_01:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #5
+; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_not_multiple_of_4_02(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_02:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #6
+; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 6
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_not_multiple_of_4_03(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_03:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #7
+; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 7
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_outside_lower_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #1
+; CHECK-NEXT:    mov x9, #-576
+; CHECK-NEXT:    lsr x8, x8, #4
+; CHECK-NEXT:    mul x8, x8, x9
+; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #4) #9)
+; xM = -9 * 2^6
+; xP = RDVL * 2^-4
+; xOFFSET = RDVL * 2^-4 * -9 * 2^6 = RDVL * -36
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -36
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_outside_upper_bound:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdvl x8, #1
+; CHECK-NEXT:    mov w9, #512
+; CHECK-NEXT:    lsr x8, x8, #4
+; CHECK-NEXT:    mul x8, x8, x9
+; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ret
+; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #16) #2)
+; xM = 2^9
+; xP = RDVL * 2^-4
+; xOFFSET = RDVL * 2^-4 * 2^9 = RDVL * 32
+  %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 32
+  %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+; ld4h
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld4.nxv32i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16> *%addr) {
+; CHECK-LABEL: ld4.nxv32i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #8
+; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 8
+  %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
+  ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+}
+
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld4.nxv32f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half> *%addr) {
+; CHECK-LABEL: ld4.nxv32f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #28
+; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 28
+  %base_ptr = bitcast <vscale x 8 x half>* %base to half *
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
+  ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld4.nxv32bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat> *%addr) #0 {
+; CHECK-LABEL: ld4.nxv32bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #-32
+; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -32
+  %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
+  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+}
+
+; ld4w
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld4.nxv16i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> *%addr) {
+; CHECK-LABEL: ld4.nxv16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #28
+; CHECK-NEXT:    ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 28
+  %base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
+  ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld4.nxv16f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float>* %addr) {
+; CHECK-LABEL: ld4.nxv16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #-32
+; CHECK-NEXT:    ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -32
+  %base_ptr = bitcast <vscale x 4 x float>* %base to float *
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
+  ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+; ld4d
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld4.nxv8i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> *%addr) {
+; CHECK-LABEL: ld4.nxv8i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #28
+; CHECK-NEXT:    ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 28
+  %base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
+  ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld4.nxv8f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double> *%addr) {
+; CHECK-LABEL: ld4.nxv8f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    addvl x8, x0, #-32
+; CHECK-NEXT:    ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x8]
+; CHECK-NEXT:    ret
+  %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -32
+  %base_ptr = bitcast <vscale x 2 x double>* %base to double *
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double * %base_ptr)
+  ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+; +bf16 is required for the bfloat version.
+attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll
new file mode 100644
index 0000000000000..3eb68c0edfe16
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+reg-addr-mode.ll
@@ -0,0 +1,284 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=sve < %s | FileCheck %s
+
+; ld2b
+define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i8, i8 *  %addr, i64 %a
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+; ld2h
+define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld2.nxv16i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i16, i16 *  %addr, i64 %a
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
+  ret { <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+}
+
+define { <vscale x 8 x half>, <vscale x 8 x half> } @ld2.nxv16f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr half, half *  %addr, i64 %a
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%addr2)
+  ret { <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld2.nxv16bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
+; CHECK-LABEL: ld2.nxv16bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
+  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+}
+
+; ld2w
+define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld2.nxv8i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i32, i32 *  %addr, i64 %a
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
+  ret { <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float> } @ld2.nxv8f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr float, float *  %addr, i64 %a
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%addr2)
+  ret { <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+; ld2d
+define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld2.nxv4i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i64, i64 *  %addr, i64 %a
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
+  ret { <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double> } @ld2.nxv4f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr double, double *  %addr, i64 %a
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%addr2)
+  ret { <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+; ld3b
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv48i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i8, i8 *  %addr, i64 %a
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+; ld3h
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld3.nxv24i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv24i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i16, i16 *  %addr, i64 %a
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
+  ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+}
+
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld3.nxv24f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv24f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr half, half *  %addr, i64 %a
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%addr2)
+  ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld3.nxv24bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
+; CHECK-LABEL: ld3.nxv24bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
+  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+}
+
+; ld3w
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld3.nxv12i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv12i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i32, i32 *  %addr, i64 %a
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
+  ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld3.nxv12f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv12f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr float, float *  %addr, i64 %a
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%addr2)
+  ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+; ld3d
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld3.nxv6i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv6i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i64, i64 *  %addr, i64 %a
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
+  ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld3.nxv6f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv6f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr double, double *  %addr, i64 %a
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%addr2)
+  ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+; ld4b
+define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv64i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i8, i8 *  %addr, i64 %a
+  %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
+  ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res
+}
+
+; ld4h
+define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld4.nxv32i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv32i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i16, i16 *  %addr, i64 %a
+  %res = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
+  ret { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %res
+}
+
+define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld4.nxv32f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv32f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr half, half *  %addr, i64 %a
+  %res = call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%addr2)
+  ret { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %res
+}
+
+define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld4.nxv32bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
+; CHECK-LABEL: ld4.nxv32bf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
+  %res = call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
+  ret { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } %res
+}
+
+; ld4w
+define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld4.nxv16i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i32, i32 *  %addr, i64 %a
+  %res = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
+  ret { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %res
+}
+
+define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld4.nxv16f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr float, float *  %addr, i64 %a
+  %res = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%addr2)
+  ret { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %res
+}
+
+; ld4d
+define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld4.nxv8i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv8i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr i64, i64 *  %addr, i64 %a
+  %res = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
+  ret { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } %res
+}
+
+define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld4.nxv8f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv8f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT:    ret
+  %addr2 = getelementptr double, double *  %addr, i64 %a
+  %res = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%addr2)
+  ret { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %res
+}
+
+declare { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld2.sret.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld2.sret.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld2.sret.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld2.sret.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld2.sret.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld2.sret.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld2.sret.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld3.sret.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld3.sret.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld3.sret.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld3.sret.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld3.sret.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld3.sret.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+declare { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.ld4.sret.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.ld4.sret.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.ld4.sret.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld4.sret.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sve.ld4.sret.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld4.sret.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.aarch64.sve.ld4.sret.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+; +bf16 is required for the bfloat version.
+attributes #0 = { "target-features"="+sve,+bf16" }


        


More information about the llvm-commits mailing list