[llvm] df8e0ce - [SVE] Extend getMemVTFromNode to cover the sret variants of sve.ld2/3/4.

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 8 10:42:25 PST 2022


Author: Paul Walker
Date: 2022-11-08T18:41:15Z
New Revision: df8e0ce59248822029aa8deafb8e6ff5a5eb5424

URL: https://github.com/llvm/llvm-project/commit/df8e0ce59248822029aa8deafb8e6ff5a5eb5424
DIFF: https://github.com/llvm/llvm-project/commit/df8e0ce59248822029aa8deafb8e6ff5a5eb5424.diff

LOG: [SVE] Extend getMemVTFromNode to cover the sret variants of sve.ld2/3/4.

This enables the use of reg+imm addressing modes to match the
non-sret variants of these intrinsics.

Differential Revision: https://reviews.llvm.org/D132392

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
    llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index de44144dc25bc..42aad756fcd28 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -5428,22 +5428,30 @@ static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
     break;
   }
 
-  if (Opcode != ISD::INTRINSIC_VOID)
+  if (Opcode != ISD::INTRINSIC_VOID && Opcode != ISD::INTRINSIC_W_CHAIN)
     return EVT();
 
-  const unsigned IntNo =
-      cast<ConstantSDNode>(Root->getOperand(1))->getZExtValue();
-  if (IntNo == Intrinsic::aarch64_sme_ldr ||
-      IntNo == Intrinsic::aarch64_sme_str)
-    return MVT::nxv16i8;
-
-  if (IntNo != Intrinsic::aarch64_sve_prf)
+  switch (cast<ConstantSDNode>(Root->getOperand(1))->getZExtValue()) {
+  default:
     return EVT();
-
-  // We are using an SVE prefetch intrinsic. Type must be inferred
-  // from the width of the predicate.
-  return getPackedVectorTypeFromPredicateType(
-      Ctx, Root->getOperand(2)->getValueType(0), /*NumVec=*/1);
+  case Intrinsic::aarch64_sme_ldr:
+  case Intrinsic::aarch64_sme_str:
+    return MVT::nxv16i8;
+  case Intrinsic::aarch64_sve_prf:
+    // We are using an SVE prefetch intrinsic. Type must be inferred from the
+    // width of the predicate.
+    return getPackedVectorTypeFromPredicateType(
+        Ctx, Root->getOperand(2)->getValueType(0), /*NumVec=*/1);
+  case Intrinsic::aarch64_sve_ld2_sret:
+    return getPackedVectorTypeFromPredicateType(
+        Ctx, Root->getOperand(2)->getValueType(0), /*NumVec=*/2);
+  case Intrinsic::aarch64_sve_ld3_sret:
+    return getPackedVectorTypeFromPredicateType(
+        Ctx, Root->getOperand(2)->getValueType(0), /*NumVec=*/3);
+  case Intrinsic::aarch64_sve_ld4_sret:
+    return getPackedVectorTypeFromPredicateType(
+        Ctx, Root->getOperand(2)->getValueType(0), /*NumVec=*/4);
+  }
 }
 
 /// SelectAddrModeIndexedSVE - Attempt selection of the addressing mode:

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll
index 9aac5d35d8b25..721ba3acde260 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldN-sret-reg+imm-addr-mode.ll
@@ -12,8 +12,7 @@
 define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
 ; CHECK-LABEL: ld2.nxv32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #2
-; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, #2, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 2
   %base_ptr = bitcast <vscale x 16 x i8>* %base to i8*
@@ -24,8 +23,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8(<vscale x 16 x i1
 define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
 ; CHECK-LABEL: ld2.nxv32i8_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #-16
-; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -16
   %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
@@ -36,8 +34,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_lower_bound(<vsca
 define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
 ; CHECK-LABEL: ld2.nxv32i8_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #14
-; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ld2b { z0.b, z1.b }, p0/z, [x0, #14, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 14
   %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
@@ -85,8 +82,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @ld2.nxv32i8_outside_upper_bou
 define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld2.nxv16i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16>* %addr) {
 ; CHECK-LABEL: ld2.nxv16i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #14
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x8]
+; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, #14, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 14
   %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
@@ -97,8 +93,7 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @ld2.nxv16i16(<vscale x 8 x i1
 define { <vscale x 8 x half>, <vscale x 8 x half> } @ld2.nxv16f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half>* %addr) {
 ; CHECK-LABEL: ld2.nxv16f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #-16
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x8]
+; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 -16
   %base_ptr = bitcast <vscale x 8 x half>* %base to half *
@@ -109,8 +104,7 @@ define { <vscale x 8 x half>, <vscale x 8 x half> } @ld2.nxv16f16(<vscale x 8 x
 define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld2.nxv16bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat>* %addr) #0 {
 ; CHECK-LABEL: ld2.nxv16bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #12
-; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x8]
+; CHECK-NEXT:    ld2h { z0.h, z1.h }, p0/z, [x0, #12, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 12
   %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
@@ -122,8 +116,7 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld2.nxv16bf16(<vscale x
 define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld2.nxv8i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32>* %addr) {
 ; CHECK-LABEL: ld2.nxv8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #14
-; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x8]
+; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, #14, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 14
   %base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
@@ -134,8 +127,7 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32> } @ld2.nxv8i32(<vscale x 4 x i1>
 define { <vscale x 4 x float>, <vscale x 4 x float> } @ld2.nxv8f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float>* %addr) {
 ; CHECK-LABEL: ld2.nxv8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #-16
-; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x8]
+; CHECK-NEXT:    ld2w { z0.s, z1.s }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -16
   %base_ptr = bitcast <vscale x 4 x float>* %base to float *
@@ -147,8 +139,7 @@ define { <vscale x 4 x float>, <vscale x 4 x float> } @ld2.nxv8f32(<vscale x 4 x
 define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld2.nxv4i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64>* %addr) {
 ; CHECK-LABEL: ld2.nxv4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #14
-; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x8]
+; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, #14, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 14
   %base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
@@ -159,8 +150,7 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64> } @ld2.nxv4i64(<vscale x 2 x i1>
 define { <vscale x 2 x double>, <vscale x 2 x double> } @ld2.nxv4f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double>* %addr) {
 ; CHECK-LABEL: ld2.nxv4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #-16
-; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x8]
+; CHECK-NEXT:    ld2d { z0.d, z1.d }, p0/z, [x0, #-16, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -16
   %base_ptr = bitcast <vscale x 2 x double>* %base to double *
@@ -172,8 +162,7 @@ define { <vscale x 2 x double>, <vscale x 2 x double> } @ld2.nxv4f64(<vscale x 2
 define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
 ; CHECK-LABEL: ld3.nxv48i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #3
-; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, #3, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3
   %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
@@ -184,8 +173,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48
 define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
 ; CHECK-LABEL: ld3.nxv48i8_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #-24
-; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -24
   %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
@@ -196,8 +184,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48
 define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
 ; CHECK-LABEL: ld3.nxv48i8_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #21
-; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ld3b { z0.b - z2.b }, p0/z, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 21
   %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
@@ -257,8 +244,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld3.nxv48
 define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld3.nxv24i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16> *%addr) {
 ; CHECK-LABEL: ld3.nxv24i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #21
-; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x8]
+; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 21
   %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
@@ -269,8 +255,7 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld3.nxv24
 define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld3.nxv24f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half> *%addr) {
 ; CHECK-LABEL: ld3.nxv24f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #21
-; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x8]
+; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 21
   %base_ptr = bitcast <vscale x 8 x half>* %base to half *
@@ -281,8 +266,7 @@ define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld3.nx
 define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld3.nxv24bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat> *%addr) #0 {
 ; CHECK-LABEL: ld3.nxv24bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #-24
-; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x8]
+; CHECK-NEXT:    ld3h { z0.h - z2.h }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -24
   %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
@@ -294,8 +278,7 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @
 define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld3.nxv12i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> *%addr) {
 ; CHECK-LABEL: ld3.nxv12i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #21
-; CHECK-NEXT:    ld3w { z0.s - z2.s }, p0/z, [x8]
+; CHECK-NEXT:    ld3w { z0.s - z2.s }, p0/z, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 21
   %base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
@@ -306,8 +289,7 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld3.nxv12
 define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld3.nxv12f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float> *%addr) {
 ; CHECK-LABEL: ld3.nxv12f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #-24
-; CHECK-NEXT:    ld3w { z0.s - z2.s }, p0/z, [x8]
+; CHECK-NEXT:    ld3w { z0.s - z2.s }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -24
   %base_ptr = bitcast <vscale x 4 x float>* %base to float *
@@ -319,8 +301,7 @@ define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld3
 define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld3.nxv6i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> *%addr) {
 ; CHECK-LABEL: ld3.nxv6i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #21
-; CHECK-NEXT:    ld3d { z0.d - z2.d }, p0/z, [x8]
+; CHECK-NEXT:    ld3d { z0.d - z2.d }, p0/z, [x0, #21, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 21
   %base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
@@ -331,8 +312,7 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld3.nxv6i
 define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld3.nxv6f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double> *%addr) {
 ; CHECK-LABEL: ld3.nxv6f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #-24
-; CHECK-NEXT:    ld3d { z0.d - z2.d }, p0/z, [x8]
+; CHECK-NEXT:    ld3d { z0.d - z2.d }, p0/z, [x0, #-24, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -24
   %base_ptr = bitcast <vscale x 2 x double>* %base to double *
@@ -344,8 +324,7 @@ define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @
 define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
 ; CHECK-LABEL: ld4.nxv64i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #4
-; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, #4, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4
   %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
@@ -356,8 +335,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
 define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_lower_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
 ; CHECK-LABEL: ld4.nxv64i8_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #-32
-; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -32
   %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
@@ -368,8 +346,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
 define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @ld4.nxv64i8_upper_bound(<vscale x 16 x i1> %Pg, <vscale x 16 x i8> *%addr) {
 ; CHECK-LABEL: ld4.nxv64i8_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdvl x8, #28
-; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, x8]
+; CHECK-NEXT:    ld4b { z0.b - z3.b }, p0/z, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 28
   %base_ptr = bitcast <vscale x 16 x i8>* %base to i8 *
@@ -455,8 +432,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
 define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @ld4.nxv32i16(<vscale x 8 x i1> %Pg, <vscale x 8 x i16> *%addr) {
 ; CHECK-LABEL: ld4.nxv32i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #8
-; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x8]
+; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x0, #8, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 8
   %base_ptr = bitcast <vscale x 8 x i16>* %base to i16 *
@@ -467,8 +443,7 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8
 define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @ld4.nxv32f16(<vscale x 8 x i1> %Pg, <vscale x 8 x half> *%addr) {
 ; CHECK-LABEL: ld4.nxv32f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #28
-; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x8]
+; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 28
   %base_ptr = bitcast <vscale x 8 x half>* %base to half *
@@ -479,8 +454,7 @@ define { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale
 define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @ld4.nxv32bf16(<vscale x 8 x i1> %Pg, <vscale x 8 x bfloat> *%addr) #0 {
 ; CHECK-LABEL: ld4.nxv32bf16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #-32
-; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x8]
+; CHECK-NEXT:    ld4h { z0.h - z3.h }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 8 x bfloat>, <vscale x 8 x bfloat>* %addr, i64 -32
   %base_ptr = bitcast <vscale x 8 x bfloat>* %base to bfloat *
@@ -492,8 +466,7 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <v
 define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @ld4.nxv16i32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> *%addr) {
 ; CHECK-LABEL: ld4.nxv16i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #28
-; CHECK-NEXT:    ld4w { z0.s - z3.s }, p0/z, [x8]
+; CHECK-NEXT:    ld4w { z0.s - z3.s }, p0/z, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 28
   %base_ptr = bitcast <vscale x 4 x i32>* %base to i32 *
@@ -504,8 +477,7 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4
 define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @ld4.nxv16f32(<vscale x 4 x i1> %Pg, <vscale x 4 x float>* %addr) {
 ; CHECK-LABEL: ld4.nxv16f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #-32
-; CHECK-NEXT:    ld4w { z0.s - z3.s }, p0/z, [x8]
+; CHECK-NEXT:    ld4w { z0.s - z3.s }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 -32
   %base_ptr = bitcast <vscale x 4 x float>* %base to float *
@@ -517,8 +489,7 @@ define { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vsca
 define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @ld4.nxv8i64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> *%addr) {
 ; CHECK-LABEL: ld4.nxv8i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #28
-; CHECK-NEXT:    ld4d { z0.d - z3.d }, p0/z, [x8]
+; CHECK-NEXT:    ld4d { z0.d - z3.d }, p0/z, [x0, #28, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 28
   %base_ptr = bitcast <vscale x 2 x i64>* %base to i64 *
@@ -529,8 +500,7 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2
 define { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @ld4.nxv8f64(<vscale x 2 x i1> %Pg, <vscale x 2 x double> *%addr) {
 ; CHECK-LABEL: ld4.nxv8f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    addvl x8, x0, #-32
-; CHECK-NEXT:    ld4d { z0.d - z3.d }, p0/z, [x8]
+; CHECK-NEXT:    ld4d { z0.d - z3.d }, p0/z, [x0, #-32, mul vl]
 ; CHECK-NEXT:    ret
   %base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -32
   %base_ptr = bitcast <vscale x 2 x double>* %base to double *


        


More information about the llvm-commits mailing list