[llvm] [RISCV][GISel] Support nxv16p0 for RV32. (PR #101573)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 8 16:45:31 PDT 2024


https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/101573

>From 234af4942b50ed35712f4d03f169716811bb18f5 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 1 Aug 2024 15:23:53 -0700
Subject: [PATCH 1/2] [RISCV][GISel] Support nxv16p0 for RV32.

Pointers are 32 bits on RV32 so nxv1p0 is lmul=mf2 and nxv16p0 is
lmul=m8.

Split the test so we can have different alignments and register class
sizes for rv32 and rv64 for the pointer tests.
---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |   7 +-
 .../legalizer/rvv/legalize-load-rv32.mir      | 103 ++++++++++++++++++
 .../legalizer/rvv/legalize-load-rv64.mir      |  79 ++++++++++++++
 .../legalizer/rvv/legalize-load.mir           |  72 ------------
 .../legalizer/rvv/legalize-store-rv32.mir     | 102 +++++++++++++++++
 .../legalizer/rvv/legalize-store-rv64.mir     |  79 ++++++++++++++
 .../legalizer/rvv/legalize-store.mir          |  72 ------------
 7 files changed, 368 insertions(+), 146 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv32.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv64.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv32.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv64.mir

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 4e583d96335d9..8c6c57aebff19 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -74,7 +74,9 @@ static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx,
   LegalityPredicate P = [=, &ST](const LegalityQuery &Query) {
     return ST.hasVInstructions() &&
            (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
-            ST.getELen() == 64);
+            ST.getELen() == 64) &&
+           (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 16 ||
+            Query.Types[TypeIdx].getScalarSizeInBits() == 32);
   };
   return all(typeInSet(TypeIdx, PtrVecTys), P);
 }
@@ -127,6 +129,7 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
   const LLT nxv2p0 = LLT::scalable_vector(2, p0);
   const LLT nxv4p0 = LLT::scalable_vector(4, p0);
   const LLT nxv8p0 = LLT::scalable_vector(8, p0);
+  const LLT nxv16p0 = LLT::scalable_vector(16, p0);
 
   using namespace TargetOpcode;
 
@@ -137,7 +140,7 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
                         nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
                         nxv1s64,  nxv2s64, nxv4s64, nxv8s64};
 
-  auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0};
+  auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0, nxv16p0};
 
   getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
       .legalFor({s32, sXLen})
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv32.mir
new file mode 100644
index 0000000000000..0c0df2b1c246a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv32.mir
@@ -0,0 +1,103 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+  define <vscale x 1 x ptr> @vload_nxv1ptr(ptr %pa) #0 {
+    %va = load <vscale x 1 x ptr>, ptr %pa, align 4
+    ret <vscale x 1 x ptr> %va
+  }
+
+  define <vscale x 2 x ptr> @vload_nxv2ptr(ptr %pa) #0 {
+    %va = load <vscale x 2 x ptr>, ptr %pa, align 8
+    ret <vscale x 2 x ptr> %va
+  }
+
+  define <vscale x 8 x ptr> @vload_nxv8ptr(ptr %pa) #0 {
+    %va = load <vscale x 8 x ptr>, ptr %pa, align 32
+    ret <vscale x 8 x ptr> %va
+  }
+
+  define <vscale x 16 x ptr> @vload_nxv16ptr(ptr %pa) #0 {
+    %va = load <vscale x 16 x ptr>, ptr %pa, align 64
+    ret <vscale x 16 x ptr> %va
+  }
+
+  attributes #0 = { "target-features"="+v" }
+
+...
+---
+name:            vload_nxv1ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv1ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa, align 4)
+    $v8 = COPY %1(<vscale x 1 x p0>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa, align 8)
+    $v8 = COPY %1(<vscale x 2 x p0>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv8ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv8ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa, align 32)
+    $v8m4 = COPY %1(<vscale x 8 x p0>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nxv16ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv16ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 16 x p0>) from %ir.pa, align 64)
+    $v8m4 = COPY %1(<vscale x 16 x p0>)
+    PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv64.mir
new file mode 100644
index 0000000000000..f7b4bbf226f3c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv64.mir
@@ -0,0 +1,79 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+  define <vscale x 1 x ptr> @vload_nxv1ptr(ptr %pa) #0 {
+    %va = load <vscale x 1 x ptr>, ptr %pa, align 8
+    ret <vscale x 1 x ptr> %va
+  }
+
+  define <vscale x 2 x ptr> @vload_nxv2ptr(ptr %pa) #0 {
+    %va = load <vscale x 2 x ptr>, ptr %pa, align 16
+    ret <vscale x 2 x ptr> %va
+  }
+
+  define <vscale x 8 x ptr> @vload_nxv8ptr(ptr %pa) #0 {
+    %va = load <vscale x 8 x ptr>, ptr %pa, align 64
+    ret <vscale x 8 x ptr> %va
+  }
+
+  attributes #0 = { "target-features"="+v" }
+
+...
+---
+name:            vload_nxv1ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv1ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa, align 8)
+    $v8 = COPY %1(<vscale x 1 x p0>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa, align 16)
+    $v8 = COPY %1(<vscale x 2 x p0>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv8ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv8ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa, align 64)
+    $v8m4 = COPY %1(<vscale x 8 x p0>)
+    PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
index 12f218863e400..2023bf7c05565 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
@@ -203,21 +203,6 @@
     ret <vscale x 2 x i64> %va
   }
 
-  define <vscale x 1 x ptr> @vload_nxv1ptr(ptr %pa) #0 {
-    %va = load <vscale x 1 x ptr>, ptr %pa, align 4
-    ret <vscale x 1 x ptr> %va
-  }
-
-  define <vscale x 2 x ptr> @vload_nxv2ptr(ptr %pa) #0 {
-    %va = load <vscale x 2 x ptr>, ptr %pa, align 8
-    ret <vscale x 2 x ptr> %va
-  }
-
-  define <vscale x 8 x ptr> @vload_nxv8ptr(ptr %pa) #0 {
-    %va = load <vscale x 8 x ptr>, ptr %pa, align 32
-    ret <vscale x 8 x ptr> %va
-  }
-
   attributes #0 = { "target-features"="+v" }
 
 ...
@@ -984,60 +969,3 @@ body:             |
     PseudoRET implicit $v8m2
 
 ...
----
-name:            vload_nxv1ptr
-body:             |
-  bb.1 (%ir-block.0):
-    liveins: $x10
-
-    ; CHECK-LABEL: name: vload_nxv1ptr
-    ; CHECK: liveins: $x10
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
-    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
-    ; CHECK-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
-    $v8 = COPY %1(<vscale x 1 x p0>)
-    PseudoRET implicit $v8
-
-...
----
-name:            vload_nxv2ptr
-body:             |
-  bb.1 (%ir-block.0):
-    liveins: $x10
-
-    ; CHECK-LABEL: name: vload_nxv2ptr
-    ; CHECK: liveins: $x10
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
-    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
-    ; CHECK-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
-    $v8 = COPY %1(<vscale x 2 x p0>)
-    PseudoRET implicit $v8
-
-...
----
-name:            vload_nxv8ptr
-body:             |
-  bb.1 (%ir-block.0):
-    liveins: $x10
-
-    ; CHECK-LABEL: name: vload_nxv8ptr
-    ; CHECK: liveins: $x10
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
-    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
-    ; CHECK-NEXT: PseudoRET implicit $v8m4
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
-    $v8m4 = COPY %1(<vscale x 8 x p0>)
-    PseudoRET implicit $v8m4
-
-...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv32.mir
new file mode 100644
index 0000000000000..ae4c6b879bdda
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv32.mir
@@ -0,0 +1,102 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+  define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
+    store <vscale x 1 x ptr> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
+    store <vscale x 2 x ptr> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
+    store <vscale x 8 x ptr> %b, ptr %pa, align 32
+    ret void
+  }
+
+  define void @vstore_nx16ptr(ptr %pa, <vscale x 16 x ptr> %b) #0 {
+    store <vscale x 16 x ptr> %b, ptr %pa, align 4
+    ret void
+  }
+
+  attributes #0 = { "target-features"="+v" }
+
+---
+name:            vstore_nx1ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx1ptr
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x p0>) = COPY $v8
+    G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa, align 4)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8
+
+    ; CHECK-LABEL: name: vstore_nx2ptr
+    ; CHECK: liveins: $x10, $v8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x p0>) = COPY $v8
+    G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa, align 8)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+
+    ; CHECK-LABEL: name: vstore_nx8ptr
+    ; CHECK: liveins: $x10, $v8m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m4
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x p0>) = COPY $v8m4
+    G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa, align 32)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+
+    ; CHECK-LABEL: name: vstore_nx16ptr
+    ; CHECK: liveins: $x10, $v8m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x p0>) = COPY $v8m8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa, align 64)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x p0>) = COPY $v8m8
+    G_STORE %1(<vscale x 16 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa, align 64)
+    PseudoRET
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv64.mir
new file mode 100644
index 0000000000000..9e45fb25b8428
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv64.mir
@@ -0,0 +1,79 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+  define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
+    store <vscale x 1 x ptr> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
+    store <vscale x 2 x ptr> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
+    store <vscale x 8 x ptr> %b, ptr %pa, align 32
+    ret void
+  }
+
+  attributes #0 = { "target-features"="+v" }
+
+...
+---
+name:            vstore_nx1ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx1ptr
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x p0>) = COPY $v8
+    G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa, align 8)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx2ptr
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x p0>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa, align 16)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+
+    ; CHECK-LABEL: name: vstore_nx8ptr
+    ; CHECK: liveins: $x10, $v8m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x p0>) = COPY $v8m8
+    G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa, align 64)
+    PseudoRET
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
index b91d25509646f..4bb4eb5fa0c72 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -203,21 +203,6 @@
     ret void
   }
 
-  define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
-    store <vscale x 1 x ptr> %b, ptr %pa, align 4
-    ret void
-  }
-
-  define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
-    store <vscale x 2 x ptr> %b, ptr %pa, align 8
-    ret void
-  }
-
-  define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
-    store <vscale x 8 x ptr> %b, ptr %pa, align 32
-    ret void
-  }
-
   attributes #0 = { "target-features"="+v" }
 
 ...
@@ -984,60 +969,3 @@ body:             |
     PseudoRET
 
 ...
----
-name:            vstore_nx1ptr
-body:             |
-  bb.1 (%ir-block.0):
-    liveins: $v8, $x10
-
-    ; CHECK-LABEL: name: vstore_nx1ptr
-    ; CHECK: liveins: $v8, $x10
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
-    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
-    ; CHECK-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x p0>) = COPY $v8
-    G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
-    PseudoRET
-
-...
----
-name:            vstore_nx2ptr
-body:             |
-  bb.1 (%ir-block.0):
-    liveins: $x10, $v8m2
-
-    ; CHECK-LABEL: name: vstore_nx2ptr
-    ; CHECK: liveins: $x10, $v8m2
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8m2
-    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
-    ; CHECK-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x p0>) = COPY $v8m2
-    G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
-    PseudoRET
-
-...
----
-name:            vstore_nx8ptr
-body:             |
-  bb.1 (%ir-block.0):
-    liveins: $x10, $v8m8
-
-    ; CHECK-LABEL: name: vstore_nx8ptr
-    ; CHECK: liveins: $x10, $v8m8
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m8
-    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
-    ; CHECK-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x p0>) = COPY $v8m8
-    G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
-    PseudoRET
-
-...

>From d1c374f6cae1019f90f3d59332429b0cfc91a228 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 8 Aug 2024 16:39:19 -0700
Subject: [PATCH 2/2] fixup! remove IR from new tests.

---
 .../legalizer/rvv/legalize-load-rv32.mir      | 48 +++++--------------
 .../legalizer/rvv/legalize-load-rv64.mir      | 37 ++++----------
 .../legalizer/rvv/legalize-store-rv32.mir     | 47 +++++-------------
 .../legalizer/rvv/legalize-store-rv64.mir     | 37 ++++----------
 4 files changed, 42 insertions(+), 127 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv32.mir
index 0c0df2b1c246a..d8fe9b77de432 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv32.mir
@@ -1,45 +1,21 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
---- |
 
-  define <vscale x 1 x ptr> @vload_nxv1ptr(ptr %pa) #0 {
-    %va = load <vscale x 1 x ptr>, ptr %pa, align 4
-    ret <vscale x 1 x ptr> %va
-  }
-
-  define <vscale x 2 x ptr> @vload_nxv2ptr(ptr %pa) #0 {
-    %va = load <vscale x 2 x ptr>, ptr %pa, align 8
-    ret <vscale x 2 x ptr> %va
-  }
-
-  define <vscale x 8 x ptr> @vload_nxv8ptr(ptr %pa) #0 {
-    %va = load <vscale x 8 x ptr>, ptr %pa, align 32
-    ret <vscale x 8 x ptr> %va
-  }
-
-  define <vscale x 16 x ptr> @vload_nxv16ptr(ptr %pa) #0 {
-    %va = load <vscale x 16 x ptr>, ptr %pa, align 64
-    ret <vscale x 16 x ptr> %va
-  }
-
-  attributes #0 = { "target-features"="+v" }
-
-...
 ---
 name:            vload_nxv1ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10
 
     ; CHECK-LABEL: name: vload_nxv1ptr
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>))
     ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa, align 4)
+    %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>), align 4)
     $v8 = COPY %1(<vscale x 1 x p0>)
     PseudoRET implicit $v8
 
@@ -47,18 +23,18 @@ body:             |
 ---
 name:            vload_nxv2ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10
 
     ; CHECK-LABEL: name: vload_nxv2ptr
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>))
     ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa, align 8)
+    %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>), align 8)
     $v8 = COPY %1(<vscale x 2 x p0>)
     PseudoRET implicit $v8
 
@@ -66,18 +42,18 @@ body:             |
 ---
 name:            vload_nxv8ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10
 
     ; CHECK-LABEL: name: vload_nxv8ptr
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>))
     ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
     ; CHECK-NEXT: PseudoRET implicit $v8m4
     %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa, align 32)
+    %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>), align 32)
     $v8m4 = COPY %1(<vscale x 8 x p0>)
     PseudoRET implicit $v8m4
 
@@ -85,18 +61,18 @@ body:             |
 ---
 name:            vload_nxv16ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10
 
     ; CHECK-LABEL: name: vload_nxv16ptr
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x p0>) from %ir.pa)
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x p0>))
     ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x p0>)
     ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 16 x p0>) from %ir.pa, align 64)
+    %1:_(<vscale x 16 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 16 x p0>), align 64)
     $v8m4 = COPY %1(<vscale x 16 x p0>)
     PseudoRET implicit $v8m8
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv64.mir
index f7b4bbf226f3c..98dee70a42cdb 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load-rv64.mir
@@ -1,40 +1,21 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
---- |
 
-  define <vscale x 1 x ptr> @vload_nxv1ptr(ptr %pa) #0 {
-    %va = load <vscale x 1 x ptr>, ptr %pa, align 8
-    ret <vscale x 1 x ptr> %va
-  }
-
-  define <vscale x 2 x ptr> @vload_nxv2ptr(ptr %pa) #0 {
-    %va = load <vscale x 2 x ptr>, ptr %pa, align 16
-    ret <vscale x 2 x ptr> %va
-  }
-
-  define <vscale x 8 x ptr> @vload_nxv8ptr(ptr %pa) #0 {
-    %va = load <vscale x 8 x ptr>, ptr %pa, align 64
-    ret <vscale x 8 x ptr> %va
-  }
-
-  attributes #0 = { "target-features"="+v" }
-
-...
 ---
 name:            vload_nxv1ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10
 
     ; CHECK-LABEL: name: vload_nxv1ptr
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>))
     ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
     ; CHECK-NEXT: PseudoRET implicit $v8
     %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa, align 8)
+    %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>), align 8)
     $v8 = COPY %1(<vscale x 1 x p0>)
     PseudoRET implicit $v8
 
@@ -42,18 +23,18 @@ body:             |
 ---
 name:            vload_nxv2ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10
 
     ; CHECK-LABEL: name: vload_nxv2ptr
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>))
     ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
     ; CHECK-NEXT: PseudoRET implicit $v8m2
     %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa, align 16)
+    %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>), align 16)
     $v8 = COPY %1(<vscale x 2 x p0>)
     PseudoRET implicit $v8m2
 
@@ -61,18 +42,18 @@ body:             |
 ---
 name:            vload_nxv8ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10
 
     ; CHECK-LABEL: name: vload_nxv8ptr
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
-    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>))
     ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
     ; CHECK-NEXT: PseudoRET implicit $v8m8
     %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa, align 64)
+    %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>), align 64)
     $v8m4 = COPY %1(<vscale x 8 x p0>)
     PseudoRET implicit $v8m8
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv32.mir
index ae4c6b879bdda..a93cce6904c2c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv32.mir
@@ -1,33 +1,10 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
---- |
-
-  define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
-    store <vscale x 1 x ptr> %b, ptr %pa, align 4
-    ret void
-  }
-
-  define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
-    store <vscale x 2 x ptr> %b, ptr %pa, align 8
-    ret void
-  }
-
-  define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
-    store <vscale x 8 x ptr> %b, ptr %pa, align 32
-    ret void
-  }
-
-  define void @vstore_nx16ptr(ptr %pa, <vscale x 16 x ptr> %b) #0 {
-    store <vscale x 16 x ptr> %b, ptr %pa, align 4
-    ret void
-  }
-
-  attributes #0 = { "target-features"="+v" }
 
 ---
 name:            vstore_nx1ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $v8, $x10
 
     ; CHECK-LABEL: name: vstore_nx1ptr
@@ -35,18 +12,18 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
-    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>))
     ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x p0>) = COPY $v8
-    G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa, align 4)
+    G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>), align 4)
     PseudoRET
 
 ...
 ---
 name:            vstore_nx2ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10, $v8
 
     ; CHECK-LABEL: name: vstore_nx2ptr
@@ -54,18 +31,18 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8
-    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>))
     ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x p0>) = COPY $v8
-    G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa, align 8)
+    G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>), align 8)
     PseudoRET
 
 ...
 ---
 name:            vstore_nx8ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10, $v8m4
 
     ; CHECK-LABEL: name: vstore_nx8ptr
@@ -73,18 +50,18 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m4
-    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>))
     ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x p0>) = COPY $v8m4
-    G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa, align 32)
+    G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>), align 32)
     PseudoRET
 
 ...
 ---
 name:            vstore_nx16ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10, $v8m8
 
     ; CHECK-LABEL: name: vstore_nx16ptr
@@ -92,11 +69,11 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x p0>) = COPY $v8m8
-    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa, align 64)
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>), align 64)
     ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 16 x p0>) = COPY $v8m8
-    G_STORE %1(<vscale x 16 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa, align 64)
+    G_STORE %1(<vscale x 16 x p0>), %0(p0) :: (store (<vscale x 8 x p0>), align 64)
     PseudoRET
 
 ...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv64.mir
index 9e45fb25b8428..3373ed4d28746 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store-rv64.mir
@@ -1,29 +1,10 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
---- |
 
-  define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
-    store <vscale x 1 x ptr> %b, ptr %pa, align 4
-    ret void
-  }
-
-  define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
-    store <vscale x 2 x ptr> %b, ptr %pa, align 8
-    ret void
-  }
-
-  define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
-    store <vscale x 8 x ptr> %b, ptr %pa, align 32
-    ret void
-  }
-
-  attributes #0 = { "target-features"="+v" }
-
-...
 ---
 name:            vstore_nx1ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $v8, $x10
 
     ; CHECK-LABEL: name: vstore_nx1ptr
@@ -31,18 +12,18 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
-    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>))
     ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 1 x p0>) = COPY $v8
-    G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa, align 8)
+    G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>), align 8)
     PseudoRET
 
 ...
 ---
 name:            vstore_nx2ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10, $v8m2
 
     ; CHECK-LABEL: name: vstore_nx2ptr
@@ -50,18 +31,18 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8m2
-    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>))
     ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 2 x p0>) = COPY $v8m2
-    G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa, align 16)
+    G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>), align 16)
     PseudoRET
 
 ...
 ---
 name:            vstore_nx8ptr
 body:             |
-  bb.1 (%ir-block.0):
+  bb.1:
     liveins: $x10, $v8m8
 
     ; CHECK-LABEL: name: vstore_nx8ptr
@@ -69,11 +50,11 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m8
-    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>))
     ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
     %1:_(<vscale x 8 x p0>) = COPY $v8m8
-    G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa, align 64)
+    G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>), align 64)
     PseudoRET
 
 ...



More information about the llvm-commits mailing list