[llvm] adb28e0 - [llvm][CodeGen] Addressing modes for SVE ldN.

Francesco Petrogalli via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 27 15:29:45 PDT 2020


Author: Francesco Petrogalli
Date: 2020-07-27T22:18:28Z
New Revision: adb28e0fb2b0e97ea9dce422c09b36979cf7cd2f

URL: https://github.com/llvm/llvm-project/commit/adb28e0fb2b0e97ea9dce422c09b36979cf7cd2f
DIFF: https://github.com/llvm/llvm-project/commit/adb28e0fb2b0e97ea9dce422c09b36979cf7cd2f.diff

LOG: [llvm][CodeGen] Addressing modes for SVE ldN.

Reviewers: c-rhodes, efriedma, sdesmalen

Subscribers: huihuiz, tschuett, hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D77251

Added: 
    llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+imm-addr-mode.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+reg-addr-mode.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index dbd7db7ee8e6..7799ebfbd68e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -245,7 +245,8 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
                          unsigned SubRegIdx);
   void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
   void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
-  void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, const unsigned Opc);
+  void SelectPredicatedLoad(SDNode *N, unsigned NumVecs, unsigned Scale,
+                            unsigned Opc_rr, unsigned Opc_ri);
 
   bool SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base, SDValue &OffImm);
   /// SVE Reg+Imm addressing mode.
@@ -1434,14 +1435,23 @@ AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr,
 }
 
 void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs,
-                                               const unsigned Opc) {
+                                               unsigned Scale, unsigned Opc_ri,
+                                               unsigned Opc_rr) {
+  assert(Scale < 4 && "Invalid scaling value.");
   SDLoc DL(N);
   EVT VT = N->getValueType(0);
   SDValue Chain = N->getOperand(0);
 
+  // Optimize addressing mode.
+  SDValue Base, Offset;
+  unsigned Opc;
+  std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore(
+      N, Opc_rr, Opc_ri, N->getOperand(2),
+      CurDAG->getTargetConstant(0, DL, MVT::i64), Scale);
+
   SDValue Ops[] = {N->getOperand(1), // Predicate
-                   N->getOperand(2), // Memory operand
-                   CurDAG->getTargetConstant(0, DL, MVT::i64), Chain};
+                   Base,             // Memory operand
+                   Offset, Chain};
 
   const EVT ResTys[] = {MVT::Untyped, MVT::Other};
 
@@ -4726,51 +4736,51 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
   }
   case AArch64ISD::SVE_LD2_MERGE_ZERO: {
     if (VT == MVT::nxv16i8) {
-      SelectPredicatedLoad(Node, 2, AArch64::LD2B_IMM);
+      SelectPredicatedLoad(Node, 2, 0, AArch64::LD2B_IMM, AArch64::LD2B);
       return;
     } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
                (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
-      SelectPredicatedLoad(Node, 2, AArch64::LD2H_IMM);
+      SelectPredicatedLoad(Node, 2, 1, AArch64::LD2H_IMM, AArch64::LD2H);
       return;
     } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
-      SelectPredicatedLoad(Node, 2, AArch64::LD2W_IMM);
+      SelectPredicatedLoad(Node, 2, 2, AArch64::LD2W_IMM, AArch64::LD2W);
       return;
     } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
-      SelectPredicatedLoad(Node, 2, AArch64::LD2D_IMM);
+      SelectPredicatedLoad(Node, 2, 3, AArch64::LD2D_IMM, AArch64::LD2D);
       return;
     }
     break;
   }
   case AArch64ISD::SVE_LD3_MERGE_ZERO: {
     if (VT == MVT::nxv16i8) {
-      SelectPredicatedLoad(Node, 3, AArch64::LD3B_IMM);
+      SelectPredicatedLoad(Node, 3, 0, AArch64::LD3B_IMM, AArch64::LD3B);
       return;
     } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
                (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
-      SelectPredicatedLoad(Node, 3, AArch64::LD3H_IMM);
+      SelectPredicatedLoad(Node, 3, 1, AArch64::LD3H_IMM, AArch64::LD3H);
       return;
     } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
-      SelectPredicatedLoad(Node, 3, AArch64::LD3W_IMM);
+      SelectPredicatedLoad(Node, 3, 2, AArch64::LD3W_IMM, AArch64::LD3W);
       return;
     } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
-      SelectPredicatedLoad(Node, 3, AArch64::LD3D_IMM);
+      SelectPredicatedLoad(Node, 3, 3, AArch64::LD3D_IMM, AArch64::LD3D);
       return;
     }
     break;
   }
   case AArch64ISD::SVE_LD4_MERGE_ZERO: {
     if (VT == MVT::nxv16i8) {
-      SelectPredicatedLoad(Node, 4, AArch64::LD4B_IMM);
+      SelectPredicatedLoad(Node, 4, 0, AArch64::LD4B_IMM, AArch64::LD4B);
       return;
     } else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16 ||
                (VT == MVT::nxv8bf16 && Subtarget->hasBF16())) {
-      SelectPredicatedLoad(Node, 4, AArch64::LD4H_IMM);
+      SelectPredicatedLoad(Node, 4, 1, AArch64::LD4H_IMM, AArch64::LD4H);
       return;
     } else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
-      SelectPredicatedLoad(Node, 4, AArch64::LD4W_IMM);
+      SelectPredicatedLoad(Node, 4, 2, AArch64::LD4W_IMM, AArch64::LD4W);
       return;
     } else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
-      SelectPredicatedLoad(Node, 4, AArch64::LD4D_IMM);
+      SelectPredicatedLoad(Node, 4, 3, AArch64::LD4D_IMM, AArch64::LD4D);
       return;
     }
     break;
@@ -4790,10 +4800,14 @@ FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
 
 /// When \p PredVT is a scalable vector predicate in the form
 /// MVT::nx<M>xi1, it builds the correspondent scalable vector of
-/// integers MVT::nx<M>xi<bits> s.t. M x bits = 128. If the input
+/// integers MVT::nx<M>xi<bits> s.t. M x bits = 128. When targeting
+/// structured vectors (NumVec >1), the output data type is
+/// MVT::nx<M*NumVec>xi<bits> s.t. M x bits = 128. If the input
 /// PredVT is not in the form MVT::nx<M>xi1, it returns an invalid
 /// EVT.
-static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT) {
+static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT,
+                                                unsigned NumVec) {
+  assert(NumVec > 0 && NumVec < 5 && "Invalid number of vectors.");
   if (!PredVT.isScalableVector() || PredVT.getVectorElementType() != MVT::i1)
     return EVT();
 
@@ -4803,7 +4817,8 @@ static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT) {
 
   ElementCount EC = PredVT.getVectorElementCount();
   EVT ScalarVT = EVT::getIntegerVT(Ctx, AArch64::SVEBitsPerBlock / EC.Min);
-  EVT MemVT = EVT::getVectorVT(Ctx, ScalarVT, EC);
+  EVT MemVT = EVT::getVectorVT(Ctx, ScalarVT, EC * NumVec);
+
   return MemVT;
 }
 
@@ -4827,6 +4842,15 @@ static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
     return cast<VTSDNode>(Root->getOperand(3))->getVT();
   case AArch64ISD::ST1_PRED:
     return cast<VTSDNode>(Root->getOperand(4))->getVT();
+  case AArch64ISD::SVE_LD2_MERGE_ZERO:
+    return getPackedVectorTypeFromPredicateType(
+        Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/2);
+  case AArch64ISD::SVE_LD3_MERGE_ZERO:
+    return getPackedVectorTypeFromPredicateType(
+        Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/3);
+  case AArch64ISD::SVE_LD4_MERGE_ZERO:
+    return getPackedVectorTypeFromPredicateType(
+        Ctx, Root->getOperand(1)->getValueType(0), /*NumVec=*/4);
   default:
     break;
   }
@@ -4842,7 +4866,7 @@ static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
   // We are using an SVE prefetch intrinsic. Type must be inferred
   // from the width of the predicate.
   return getPackedVectorTypeFromPredicateType(
-      Ctx, Root->getOperand(2)->getValueType(0));
+      Ctx, Root->getOperand(2)->getValueType(0), /*NumVec=*/1);
 }
 
 /// SelectAddrModeIndexedSVE - Attempt selection of the addressing mode:

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+imm-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+imm-addr-mode.ll
new file mode 100644
index 000000000000..1ffa78ec2735
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+imm-addr-mode.ll
@@ -0,0 +1,495 @@
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=sve < %s | FileCheck %s
+
+; NOTE: invalid, upper and lower bound immediate values of the regimm
+; addressing mode are checked only for the byte version of each
+; instruction (`ld<N>b`), as the code for detecting the immediate is
+; common to all instructions, and varies only for the number of
+; elements of the structure store, which is <N> = 2, 3, 4.
+
+; ld2b
+define <vscale x 32 x i8> @ld2.nxv32i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8:
+; CHECK: ld2b { z0.b, z1.b }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 2
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8*
+%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 32 x i8> @ld2.nxv32i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8_lower_bound:
+; CHECK: ld2b { z0.b, z1.b }, p0/z, [x0, #-16, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -16
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 32 x i8> @ld2.nxv32i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8_upper_bound:
+; CHECK: ld2b { z0.b, z1.b }, p0/z, [x0, #14, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 14
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 32 x i8> @ld2.nxv32i8_not_multiple_of_2(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8_not_multiple_of_2:
+; CHECK:      rdvl x[[OFFSET:[0-9]]], #3
+; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 32 x i8> @ld2.nxv32i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8_outside_lower_bound:
+; CHECK:      rdvl x[[OFFSET:[0-9]]], #-18
+; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -18
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 32 x i8> @ld2.nxv32i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld2.nxv32i8_outside_upper_bound:
+; CHECK:      rdvl x[[OFFSET:[0-9]]], #16
+; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 16
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 32 x i8> %res
+}
+
+; ld2h
+define <vscale x 16 x i16> @ld2.nxv16i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16>* %addr) {
+; CHECK-LABEL: ld2.nxv16i16:
+; CHECK: ld2h { z0.h, z1.h }, p0/z, [x0, #14, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 14
+%base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
+%res = call <vscale x 16 x i16> @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
+ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 16 x half> @ld2.nxv16f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half>* %addr) {
+; CHECK-LABEL: ld2.nxv16f16:
+; CHECK: ld2h { z0.h, z1.h }, p0/z, [x0, #-16, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 -16
+%base_ptr = bitcast <vscale x 8 x half>* %base to half *
+%res = call <vscale x 16 x half> @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
+ret <vscale x 16 x half> %res
+}
+
+define <vscale x 16 x bfloat> @ld2.nxv16bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat>* %addr) #0 {
+; CHECK-LABEL: ld2.nxv16bf16:
+; CHECK: ld2h { z0.h, z1.h }, p0/z, [x0, #12, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 12
+%base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
+%res = call <vscale x 16 x bfloat> @llvm.aarch64.sve.ld2.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
+ret <vscale x 16 x bfloat> %res
+}
+
+; ld2w
+define <vscale x 8 x i32> @ld2.nxv8i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32>* %addr) {
+; CHECK-LABEL: ld2.nxv8i32:
+; CHECK: ld2w { z0.s, z1.s }, p0/z, [x0, #14, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 14
+%base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
+%res = call <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
+ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 8 x float> @ld2.nxv8f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float>* %addr) {
+; CHECK-LABEL: ld2.nxv8f32:
+; CHECK: ld2w { z0.s, z1.s }, p0/z, [x0, #-16, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -16
+%base_ptr = bitcast <vscale x 4 x float>* %base to float *
+%res = call <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
+ret <vscale x 8 x float> %res
+}
+
+; ld2d
+define <vscale x 4 x i64> @ld2.nxv4i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64>* %addr) {
+; CHECK-LABEL: ld2.nxv4i64:
+; CHECK: ld2d { z0.d, z1.d }, p0/z, [x0, #14, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 14
+%base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
+%res = call <vscale x 4 x i64> @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
+ret <vscale x 4 x i64> %res
+}
+
+define <vscale x 4 x double> @ld2.nxv4f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double>* %addr) {
+; CHECK-LABEL: ld2.nxv4f64:
+; CHECK: ld2d { z0.d, z1.d }, p0/z, [x0, #-16, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -16
+%base_ptr = bitcast <vscale x 2 x double>* %base to double *
+%res = call <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%base_ptr)
+ret <vscale x 4 x double> %res
+}
+
+; ld3b
+define <vscale x 48 x i8> @ld3.nxv48i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8:
+; CHECK: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, #3, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 48 x i8> %res
+}
+
+define <vscale x 48 x i8> @ld3.nxv48i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_lower_bound:
+; CHECK: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, #-24, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -24
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 48 x i8> %res
+}
+
+define <vscale x 48 x i8> @ld3.nxv48i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_upper_bound:
+; CHECK: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, #21, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 21
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 48 x i8> %res
+}
+
+define <vscale x 48 x i8> @ld3.nxv48i8_not_multiple_of_3_01(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_01:
+; CHECK:      rdvl x[[OFFSET:[0-9]]], #4
+; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 48 x i8> %res
+}
+
+define <vscale x 48 x i8> @ld3.nxv48i8_not_multiple_of_3_02(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_not_multiple_of_3_02:
+; CHECK:      rdvl x[[OFFSET:[0-9]]], #5
+; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 48 x i8> %res
+}
+
+define <vscale x 48 x i8> @ld3.nxv48i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_outside_lower_bound:
+; CHECK:      rdvl x[[OFFSET:[0-9]]], #-27
+; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -27
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 48 x i8> %res
+}
+
+define <vscale x 48 x i8> @ld3.nxv48i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld3.nxv48i8_outside_upper_bound:
+; CHECK:      rdvl x[[OFFSET:[0-9]]], #24
+; CHECK-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 24
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 48 x i8> %res
+}
+
+; ld3h
+define <vscale x 24 x i16> @ld3.nxv24i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16> *%addr) {
+; CHECK-LABEL: ld3.nxv24i16:
+; CHECK: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, #21, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 21
+%base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
+%res = call <vscale x 24 x i16> @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
+ret <vscale x 24 x i16> %res
+}
+
+define <vscale x 24 x half> @ld3.nxv24f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half> *%addr) {
+; CHECK-LABEL: ld3.nxv24f16:
+; CHECK: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, #21, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 21
+%base_ptr = bitcast <vscale x 8 x half>* %base to half *
+%res = call <vscale x 24 x half> @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
+ret <vscale x 24 x half> %res
+}
+
+define <vscale x 24 x bfloat> @ld3.nxv24bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat> *%addr) #0 {
+; CHECK-LABEL: ld3.nxv24bf16:
+; CHECK: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, #-24, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -24
+%base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
+%res = call <vscale x 24 x bfloat> @llvm.aarch64.sve.ld3.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
+ret <vscale x 24 x bfloat> %res
+}
+
+; ld3w
+define <vscale x 12 x i32> @ld3.nxv12i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> *%addr) {
+; CHECK-LABEL: ld3.nxv12i32:
+; CHECK: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, #21, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 21
+%base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
+%res = call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
+ret <vscale x 12 x i32> %res
+}
+
+define <vscale x 12 x float> @ld3.nxv12f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float> *%addr) {
+; CHECK-LABEL: ld3.nxv12f32:
+; CHECK: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, #-24, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -24
+%base_ptr = bitcast <vscale x 4 x float>* %base to float *
+%res = call <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
+ret <vscale x 12 x float> %res
+}
+
+; ld3d
+define <vscale x 6 x i64> @ld3.nxv6i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> *%addr) {
+; CHECK-LABEL: ld3.nxv6i64:
+; CHECK: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, #21, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 21
+%base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
+%res = call <vscale x 6 x i64> @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
+ret <vscale x 6 x i64> %res
+}
+
+define <vscale x 6 x double> @ld3.nxv6f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double> *%addr) {
+; CHECK-LABEL: ld3.nxv6f64:
+; CHECK: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, #-24, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -24
+%base_ptr = bitcast <vscale x 2 x double>* %base to double *
+%res = call <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%base_ptr)
+ret <vscale x 6 x double> %res
+}
+
+; ; ld4b
+define <vscale x 64 x i8> @ld4.nxv64i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8:
+; CHECK: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @ld4.nxv64i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_lower_bound:
+; CHECK: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, #-32, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -32
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @ld4.nxv64i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_upper_bound:
+; CHECK: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, #28, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 28
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @ld4.nxv64i8_not_multiple_of_4_01(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_01:
+; CHECK:      rdvl x[[OFFSET:[0-9]]], #5
+; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @ld4.nxv64i8_not_multiple_of_4_02(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_02:
+; CHECK:      rdvl x[[OFFSET:[0-9]]], #6
+; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 6
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @ld4.nxv64i8_not_multiple_of_4_03(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_not_multiple_of_4_03:
+; CHECK:      rdvl x[[OFFSET:[0-9]]], #7
+; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 7
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @ld4.nxv64i8_outside_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_outside_lower_bound:
+; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #4) #9)
+; xM = -9 * 2^6
+; xP = RDVL * 2^-4 
+; xOFFSET = RDVL * 2^-4 * -9 * 2^6 = RDVL * -36
+; CHECK:      rdvl x[[N:[0-9]]], #1
+; CHECK-DAG:  mov  x[[M:[0-9]]], #-576
+; CHECK-DAG:  lsr  x[[P:[0-9]]], x[[N]], #4
+; CHECK-DAG:  mul  x[[OFFSET:[0-9]]], x[[P]], x[[M]]
+; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -36
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @ld4.nxv64i8_outside_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
+; CHECK-LABEL: ld4.nxv64i8_outside_upper_bound:
+; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #16) #2)
+; xM = 2^9
+; xP = RDVL * 2^-4
+; xOFFSET = RDVL * 2^-4 * 2^9 = RDVL * 32
+; CHECK:      rdvl x[[N:[0-9]]], #1
+; CHECK-DAG:  mov  w[[M:[0-9]]], #512
+; CHECK-DAG:  lsr  x[[P:[0-9]]], x[[N]], #4
+; CHECK-DAG:  mul  x[[OFFSET:[0-9]]], x[[P]], x[[M]]
+; CHECK-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x[[OFFSET]]]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 32
+%base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
+%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
+ret <vscale x 64 x i8> %res
+}
+
+; ld4h
+define <vscale x 32 x i16> @ld4.nxv32i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16> *%addr) {
+; CHECK-LABEL: ld4.nxv32i16:
+; CHECK: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, #8, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 8
+%base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
+%res = call <vscale x 32 x i16> @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%base_ptr)
+ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 32 x half> @ld4.nxv32f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half> *%addr) {
+; CHECK-LABEL: ld4.nxv32f16:
+; CHECK: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, #28, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 28
+%base_ptr = bitcast <vscale x 8 x half>* %base to half *
+%res = call <vscale x 32 x half> @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%base_ptr)
+ret <vscale x 32 x half> %res
+}
+
+define <vscale x 32 x bfloat> @ld4.nxv32bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat> *%addr) #0 {
+; CHECK-LABEL: ld4.nxv32bf16:
+; CHECK: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, #-32, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -32
+%base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
+%res = call <vscale x 32 x bfloat> @llvm.aarch64.sve.ld4.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%base_ptr)
+ret <vscale x 32 x bfloat> %res
+}
+
+; ld4w
+define <vscale x 16 x i32> @ld4.nxv16i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> *%addr) {
+; CHECK-LABEL: ld4.nxv16i32:
+; CHECK: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, #28, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 28
+%base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
+%res = call <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%base_ptr)
+ret <vscale x 16 x i32> %res
+}
+
+define <vscale x 16 x float> @ld4.nxv16f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float>* %addr) {
+; CHECK-LABEL: ld4.nxv16f32:
+; CHECK: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, #-32, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -32
+%base_ptr = bitcast <vscale x 4 x float>* %base to float *
+%res = call <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%base_ptr)
+ret <vscale x 16 x float> %res
+}
+
+; ld4d
+define <vscale x 8 x i64> @ld4.nxv8i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> *%addr) {
+; CHECK-LABEL: ld4.nxv8i64:
+; CHECK: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, #28, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 28
+%base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
+%res = call <vscale x 8 x i64> @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%base_ptr)
+ret <vscale x 8 x i64> %res
+}
+
+define <vscale x 8 x double> @ld4.nxv8f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double> *%addr) {
+; CHECK-LABEL: ld4.nxv8f64:
+; CHECK: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, #-32, mul vl]
+; CHECK-NEXT: ret
+%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -32
+%base_ptr = bitcast <vscale x 2 x double>* %base to double *
+%res = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double * %base_ptr)
+ret <vscale x 8 x double> %res
+}
+
+declare <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i16> @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare <vscale x 4 x i64> @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare <vscale x 16 x half> @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare <vscale x 16 x bfloat> @llvm.aarch64.sve.ld2.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+declare <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 24 x i16> @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare <vscale x 6 x i64> @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare <vscale x 24 x half> @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare <vscale x 24 x bfloat> @llvm.aarch64.sve.ld3.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+declare <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 32 x i16> @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare <vscale x 8 x i64> @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare <vscale x 32 x half> @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare <vscale x 32 x bfloat> @llvm.aarch64.sve.ld4.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+; +bf16 is required for the bfloat version.
+attributes #0 = { "target-features"="+sve,+bf16" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+reg-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+reg-addr-mode.ll
new file mode 100644
index 000000000000..ab59c8413795
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-reg+reg-addr-mode.ll
@@ -0,0 +1,259 @@
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=sve < %s | FileCheck %s
+
+; ld2b
+define <vscale x 32 x i8> @ld2.nxv32i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv32i8:
+; CHECK: ld2b { z0.b, z1.b }, p0/z, [x0, x1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i8, i8 *  %addr, i64 %a
+%res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
+ret <vscale x 32 x i8> %res
+}
+
+; ld2h
+define <vscale x 16 x i16> @ld2.nxv16i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv16i16:
+; CHECK: ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i16, i16 *  %addr, i64 %a
+%res = call <vscale x 16 x i16> @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
+ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 16 x half> @ld2.nxv16f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv16f16:
+; CHECK: ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr half, half *  %addr, i64 %a
+%res = call <vscale x 16 x half> @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%addr2)
+ret <vscale x 16 x half> %res
+}
+
+define <vscale x 16 x bfloat> @ld2.nxv16bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
+; CHECK-LABEL: ld2.nxv16bf16:
+; CHECK: ld2h { z0.h, z1.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
+%res = call <vscale x 16 x bfloat> @llvm.aarch64.sve.ld2.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
+ret <vscale x 16 x bfloat> %res
+}
+
+; ld2w
+define <vscale x 8 x i32> @ld2.nxv8i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv8i32:
+; CHECK: ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i32, i32 *  %addr, i64 %a
+%res = call <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
+ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 8 x float> @ld2.nxv8f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv8f32:
+; CHECK: ld2w { z0.s, z1.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+%addr2 = getelementptr float, float *  %addr, i64 %a
+%res = call <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%addr2)
+ret <vscale x 8 x float> %res
+}
+
+; ld2d
+define <vscale x 4 x i64> @ld2.nxv4i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv4i64:
+; CHECK: ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i64, i64 *  %addr, i64 %a
+%res = call <vscale x 4 x i64> @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
+ret <vscale x 4 x i64> %res
+}
+
+define <vscale x 4 x double> @ld2.nxv4f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
+; CHECK-LABEL: ld2.nxv4f64:
+; CHECK: ld2d { z0.d, z1.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ret
+%addr2 = getelementptr double, double *  %addr, i64 %a
+%res = call <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%addr2)
+ret <vscale x 4 x double> %res
+}
+
+; ld3b
+define <vscale x 48 x i8> @ld3.nxv48i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv48i8:
+; CHECK: ld3b { z0.b, z1.b, z2.b }, p0/z, [x0, x1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i8, i8 *  %addr, i64 %a
+%res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
+ret <vscale x 48 x i8> %res
+}
+
+; ld3h
+define <vscale x 24 x i16> @ld3.nxv24i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv24i16:
+; CHECK: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i16, i16 *  %addr, i64 %a
+%res = call <vscale x 24 x i16> @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
+ret <vscale x 24 x i16> %res
+}
+
+define <vscale x 24 x half> @ld3.nxv24f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv24f16:
+; CHECK: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr half, half *  %addr, i64 %a
+%res = call <vscale x 24 x half> @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%addr2)
+ret <vscale x 24 x half> %res
+}
+
+define <vscale x 24 x bfloat> @ld3.nxv24bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
+; CHECK-LABEL: ld3.nxv24bf16:
+; CHECK: ld3h { z0.h, z1.h, z2.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
+%res = call <vscale x 24 x bfloat> @llvm.aarch64.sve.ld3.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
+ret <vscale x 24 x bfloat> %res
+}
+
+; ld3w
+define <vscale x 12 x i32> @ld3.nxv12i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv12i32:
+; CHECK: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i32, i32 *  %addr, i64 %a
+%res = call <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
+ret <vscale x 12 x i32> %res
+}
+
+define <vscale x 12 x float> @ld3.nxv12f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv12f32:
+; CHECK: ld3w { z0.s, z1.s, z2.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+%addr2 = getelementptr float, float *  %addr, i64 %a
+%res = call <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%addr2)
+ret <vscale x 12 x float> %res
+}
+
+; ld3d
+define <vscale x 6 x i64> @ld3.nxv6i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv6i64:
+; CHECK: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i64, i64 *  %addr, i64 %a
+%res = call <vscale x 6 x i64> @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
+ret <vscale x 6 x i64> %res
+}
+
+define <vscale x 6 x double> @ld3.nxv6f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
+; CHECK-LABEL: ld3.nxv6f64:
+; CHECK: ld3d { z0.d, z1.d, z2.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ret
+%addr2 = getelementptr double, double *  %addr, i64 %a
+%res = call <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%addr2)
+ret <vscale x 6 x double> %res
+}
+
+; ld4b
+define <vscale x 64 x i8> @ld4.nxv64i8(<vscale x 16 x i1> %Pg, i8 *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv64i8:
+; CHECK: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x0, x1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i8, i8 *  %addr, i64 %a
+%res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%addr2)
+ret <vscale x 64 x i8> %res
+}
+
+; ld4h
+define <vscale x 32 x i16> @ld4.nxv32i16(<vscale x 8 x i1> %Pg, i16 *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv32i16:
+; CHECK: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i16, i16 *  %addr, i64 %a
+%res = call <vscale x 32 x i16> @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1> %Pg, i16 *%addr2)
+ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 32 x half> @ld4.nxv32f16(<vscale x 8 x i1> %Pg, half *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv32f16:
+; CHECK: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr half, half *  %addr, i64 %a
+%res = call <vscale x 32 x half> @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1> %Pg, half *%addr2)
+ret <vscale x 32 x half> %res
+}
+
+define <vscale x 32 x bfloat> @ld4.nxv32bf16(<vscale x 8 x i1> %Pg, bfloat *%addr, i64 %a) #0 {
+; CHECK-LABEL: ld4.nxv32bf16:
+; CHECK: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x0, x1, lsl #1]
+; CHECK-NEXT: ret
+%addr2 = getelementptr bfloat, bfloat *  %addr, i64 %a
+%res = call <vscale x 32 x bfloat> @llvm.aarch64.sve.ld4.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1> %Pg, bfloat *%addr2)
+ret <vscale x 32 x bfloat> %res
+}
+
+; ld4w
+define <vscale x 16 x i32> @ld4.nxv16i32(<vscale x 4 x i1> %Pg, i32 *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv16i32:
+; CHECK: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i32, i32 *  %addr, i64 %a
+%res = call <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1> %Pg, i32 *%addr2)
+ret <vscale x 16 x i32> %res
+}
+
+define <vscale x 16 x float> @ld4.nxv16f32(<vscale x 4 x i1> %Pg, float *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv16f32:
+; CHECK: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x0, x1, lsl #2]
+; CHECK-NEXT: ret
+%addr2 = getelementptr float, float *  %addr, i64 %a
+%res = call <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1> %Pg, float *%addr2)
+ret <vscale x 16 x float> %res
+}
+
+; ld4d
+define <vscale x 8 x i64> @ld4.nxv8i64(<vscale x 2 x i1> %Pg, i64 *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv8i64:
+; CHECK: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ret
+%addr2 = getelementptr i64, i64 *  %addr, i64 %a
+%res = call <vscale x 8 x i64> @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1> %Pg, i64 *%addr2)
+ret <vscale x 8 x i64> %res
+}
+
+define <vscale x 8 x double> @ld4.nxv8f64(<vscale x 2 x i1> %Pg, double *%addr, i64 %a) {
+; CHECK-LABEL: ld4.nxv8f64:
+; CHECK: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x0, x1, lsl #3]
+; CHECK-NEXT: ret
+%addr2 = getelementptr double, double *  %addr, i64 %a
+%res = call <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1> %Pg, double *%addr2)
+ret <vscale x 8 x double> %res
+}
+
+declare <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 16 x i16> @llvm.aarch64.sve.ld2.nxv16i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare <vscale x 8 x i32> @llvm.aarch64.sve.ld2.nxv8i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare <vscale x 4 x i64> @llvm.aarch64.sve.ld2.nxv4i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare <vscale x 16 x half> @llvm.aarch64.sve.ld2.nxv16f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare <vscale x 16 x bfloat> @llvm.aarch64.sve.ld2.nxv16bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 8 x float> @llvm.aarch64.sve.ld2.nxv8f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+declare <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 24 x i16> @llvm.aarch64.sve.ld3.nxv24i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare <vscale x 12 x i32> @llvm.aarch64.sve.ld3.nxv12i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare <vscale x 6 x i64> @llvm.aarch64.sve.ld3.nxv6i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare <vscale x 24 x half> @llvm.aarch64.sve.ld3.nxv24f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare <vscale x 24 x bfloat> @llvm.aarch64.sve.ld3.nxv24bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 12 x float> @llvm.aarch64.sve.ld3.nxv12f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare <vscale x 6 x double> @llvm.aarch64.sve.ld3.nxv6f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+declare <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
+declare <vscale x 32 x i16> @llvm.aarch64.sve.ld4.nxv32i16.nxv8i1.p0i16(<vscale x 8 x i1>, i16*)
+declare <vscale x 16 x i32> @llvm.aarch64.sve.ld4.nxv16i32.nxv4i1.p0i32(<vscale x 4 x i1>, i32*)
+declare <vscale x 8 x i64> @llvm.aarch64.sve.ld4.nxv8i64.nxv2i1.p0i64(<vscale x 2 x i1>, i64*)
+declare <vscale x 32 x half> @llvm.aarch64.sve.ld4.nxv32f16.nxv8i1.p0f16(<vscale x 8 x i1>, half*)
+declare <vscale x 32 x bfloat> @llvm.aarch64.sve.ld4.nxv32bf16.nxv8i1.p0bf16(<vscale x 8 x i1>, bfloat*)
+declare <vscale x 16 x float> @llvm.aarch64.sve.ld4.nxv16f32.nxv4i1.p0f32(<vscale x 4 x i1>, float*)
+declare <vscale x 8 x double> @llvm.aarch64.sve.ld4.nxv8f64.nxv2i1.p0f64(<vscale x 2 x i1>, double*)
+
+; +bf16 is required for the bfloat version.
+attributes #0 = { "target-features"="+sve,+bf16" }


        


More information about the llvm-commits mailing list