[llvm] [RISCV][GISEL] Generate VLMax using -1 constant (PR #110778)

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 2 10:16:20 PDT 2024


https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/110778

>From c945843322b1e194cc3c8c9a3fc6c418b5c5dd18 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 1 Oct 2024 18:16:12 -0700
Subject: [PATCH 1/4] [RISCV][GISEL] Generate VLMax using RISCV::VLMaxSentinel

SelectionDAG uses ISD::REGISTER and uses RISCV::X0 to represent VLMAX.
Then in ComplexPattern VLOpt uses selectVLOp to convert RISCV::X0 to
RISCV::VLMaxSentinel.

The original legalization patch for G_SPLAT_VECTOR used Register RISCV::X0
directly. $x0 has no LLT type, so GIComplexOperandMatcher has no way of matching.

The approach we are changing to here will allow us to successfully use
GIComplexOperandMatcher to implement the ComplexMatcherFn selectVLOp in GISEL.
---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 12 ++--
 .../rvv/legalize-splatvector-rv32.mir         | 70 +++++++++++--------
 .../rvv/legalize-splatvector-rv64.mir         | 70 +++++++++++--------
 3 files changed, 91 insertions(+), 61 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 66e9f752a1bbdf..412bf9b7bb3bc0 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -833,19 +833,21 @@ static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL,
 
 /// Gets the two common "VL" operands: an all-ones mask and the vector length.
 /// VecTy is a scalable vector type.
-static std::pair<MachineInstrBuilder, Register>
+static std::pair<MachineInstrBuilder, MachineInstrBuilder>
 buildDefaultVLOps(const DstOp &Dst, MachineIRBuilder &MIB,
                   MachineRegisterInfo &MRI) {
   LLT VecTy = Dst.getLLTTy(MRI);
   assert(VecTy.isScalableVector() && "Expecting scalable container type");
-  Register VL(RISCV::X0);
-  MachineInstrBuilder Mask = buildAllOnesMask(VecTy, VL, MIB, MRI);
+  const RISCVSubtarget &STI = MIB.getMF().getSubtarget<RISCVSubtarget>();
+  LLT XLenTy(STI.getXLenVT());
+  auto VL = MIB.buildConstant(XLenTy, RISCV::VLMaxSentinel);
+  auto Mask = buildAllOnesMask(VecTy, VL, MIB, MRI);
   return {Mask, VL};
 }
 
 static MachineInstrBuilder
 buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo,
-                         Register Hi, Register VL, MachineIRBuilder &MIB,
+                         Register Hi, const SrcOp &VL, MachineIRBuilder &MIB,
                          MachineRegisterInfo &MRI) {
   // TODO: If the Hi bits of the splat are undefined, then it's fine to just
   // splat Lo even if it might be sign extended. I don't think we have
@@ -861,7 +863,7 @@ buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo,
 
 static MachineInstrBuilder
 buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru,
-                         const SrcOp &Scalar, Register VL,
+                         const SrcOp &Scalar, const SrcOp &VL,
                          MachineIRBuilder &MIB, MachineRegisterInfo &MRI) {
   assert(Scalar.getLLTTy(MRI) == LLT::scalar(64) && "Unexpected VecTy!");
   auto Unmerge = MIB.buildUnmerge(LLT::scalar(32), Scalar);
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir
index 18241413a3a609..5b14dcd0b69054 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir
@@ -8,8 +8,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv1i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMCLR_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -25,8 +26,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv1i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 1 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -69,8 +71,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv2i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -86,8 +89,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv2i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 2 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -130,8 +134,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv4i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMCLR_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 4 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -147,8 +152,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv4i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 4 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -191,8 +197,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv8i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMCLR_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 8 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -208,8 +215,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv8i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 8 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -252,8 +260,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv16i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMCLR_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 16 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -269,8 +278,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv16i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 16 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -313,8 +323,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv32i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMCLR_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 32 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -330,8 +341,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv32i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 32 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -374,8 +386,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv64i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMCLR_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 64 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -391,8 +404,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv64i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL [[C]](s32)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 64 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
index 5f484f37363921..c95db3262471d9 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
@@ -8,8 +8,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv1i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMCLR_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -25,8 +26,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv1i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 1 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -72,8 +74,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv2i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -89,8 +92,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv2i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 2 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -136,8 +140,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv4i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMCLR_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 4 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -153,8 +158,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv4i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 4 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -200,8 +206,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv8i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMCLR_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 8 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -217,8 +224,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv8i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 8 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -264,8 +272,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv16i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMCLR_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 16 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -281,8 +290,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv16i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 16 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -328,8 +338,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv32i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMCLR_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 32 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -345,8 +356,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv32i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 32 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1
@@ -392,8 +404,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv64i1_0
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMCLR_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 64 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 0
@@ -409,8 +422,9 @@ tracksRegLiveness: true
 body:             |
   bb.1:
     ; CHECK-LABEL: name: splatvector_nxv64i1_1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL [[C]](s64)
     ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 64 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v0
     %0:_(s1) = G_CONSTANT i1 1

>From 4a64ebe2081a8b30ee72e7bf920a025a6b050129 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 1 Oct 2024 18:43:26 -0700
Subject: [PATCH 2/4] fixup! update regbank tests

---
 .../regbankselect/rvv/vmclr-rv32.mir          | 125 +++++++++++++++
 .../regbankselect/rvv/vmclr-rv64.mir          | 125 +++++++++++++++
 .../GlobalISel/regbankselect/rvv/vmclr.mir    | 149 ------------------
 3 files changed, 250 insertions(+), 149 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr-rv32.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr-rv64.mir
 delete mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr.mir

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr-rv32.mir
new file mode 100644
index 00000000000000..b2ad2dd777f6c3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr-rv32.mir
@@ -0,0 +1,125 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck %s
+
+---
+name:            splat_zero_nxv1i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_VMCLR_VL [[C]](s32)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s32) = G_CONSTANT i32 -1
+    %1:_(<vscale x 1 x s1>) = G_VMCLR_VL %0(s32)
+    $v0 = COPY %1(<vscale x 1 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv2i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_VMCLR_VL [[C]](s32)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s32) = G_CONSTANT i32 -1
+    %1:_(<vscale x 2 x s1>) = G_VMCLR_VL %0(s32)
+    $v0 = COPY %1(<vscale x 2 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv4i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_VMCLR_VL [[C]](s32)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 4 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s32) = G_CONSTANT i32 -1
+    %1:_(<vscale x 4 x s1>) = G_VMCLR_VL %0(s32)
+    $v0 = COPY %1(<vscale x 4 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv8i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_VMCLR_VL [[C]](s32)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 8 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s32) = G_CONSTANT i32 -1
+    %1:_(<vscale x 8 x s1>) = G_VMCLR_VL %0(s32)
+    $v0 = COPY %1(<vscale x 8 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv16i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_VMCLR_VL [[C]](s32)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 16 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s32) = G_CONSTANT i32 -1
+    %1:_(<vscale x 16 x s1>) = G_VMCLR_VL %0(s32)
+    $v0 = COPY %1(<vscale x 16 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv32i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv32i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_VMCLR_VL [[C]](s32)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 32 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s32) = G_CONSTANT i32 -1
+    %1:_(<vscale x 32 x s1>) = G_VMCLR_VL %0(s32)
+    $v0 = COPY %1(<vscale x 32 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv64i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv64i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_VMCLR_VL [[C]](s32)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 64 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s32) = G_CONSTANT i32 -1
+    %1:_(<vscale x 64 x s1>) = G_VMCLR_VL %0(s32)
+    $v0 = COPY %1(<vscale x 64 x s1>)
+    PseudoRET implicit $v0
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr-rv64.mir
new file mode 100644
index 00000000000000..9997805eb5f765
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr-rv64.mir
@@ -0,0 +1,125 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck %s
+
+---
+name:            splat_zero_nxv1i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_VMCLR_VL [[C]](s64)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s64) = G_CONSTANT i64 -1
+    %1:_(<vscale x 1 x s1>) = G_VMCLR_VL %0(s64)
+    $v0 = COPY %1(<vscale x 1 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv2i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_VMCLR_VL [[C]](s64)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s64) = G_CONSTANT i64 -1
+    %1:_(<vscale x 2 x s1>) = G_VMCLR_VL %0(s64)
+    $v0 = COPY %1(<vscale x 2 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv4i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_VMCLR_VL [[C]](s64)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 4 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s64) = G_CONSTANT i64 -1
+    %1:_(<vscale x 4 x s1>) = G_VMCLR_VL %0(s64)
+    $v0 = COPY %1(<vscale x 4 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv8i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_VMCLR_VL [[C]](s64)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 8 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s64) = G_CONSTANT i64 -1
+    %1:_(<vscale x 8 x s1>) = G_VMCLR_VL %0(s64)
+    $v0 = COPY %1(<vscale x 8 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv16i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_VMCLR_VL [[C]](s64)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 16 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s64) = G_CONSTANT i64 -1
+    %1:_(<vscale x 16 x s1>) = G_VMCLR_VL %0(s64)
+    $v0 = COPY %1(<vscale x 16 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv32i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv32i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_VMCLR_VL [[C]](s64)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 32 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s64) = G_CONSTANT i64 -1
+    %1:_(<vscale x 32 x s1>) = G_VMCLR_VL %0(s64)
+    $v0 = COPY %1(<vscale x 32 x s1>)
+    PseudoRET implicit $v0
+
+...
+---
+name:            splat_zero_nxv64i1
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv64i1
+    ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_VMCLR_VL [[C]](s64)
+    ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 64 x s1>)
+    ; CHECK-NEXT: PseudoRET implicit $v0
+    %0:_(s64) = G_CONSTANT i64 -1
+    %1:_(<vscale x 64 x s1>) = G_VMCLR_VL %0(s64)
+    $v0 = COPY %1(<vscale x 64 x s1>)
+    PseudoRET implicit $v0
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr.mir
deleted file mode 100644
index f12818227119be..00000000000000
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/vmclr.mir
+++ /dev/null
@@ -1,149 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
-# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV32I %s
-# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
-# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
-# RUN:   -o - | FileCheck -check-prefix=RV64I %s
-
----
-name:            splat_zero_nxv1i1
-legalized:       true
-regBankSelected: false
-body:             |
-  bb.1:
-    ; RV32I-LABEL: name: splat_zero_nxv1i1
-    ; RV32I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_VMCLR_VL $x0
-    ; RV32I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v0
-    ;
-    ; RV64I-LABEL: name: splat_zero_nxv1i1
-    ; RV64I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_VMCLR_VL $x0
-    ; RV64I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v0
-    %0:_(<vscale x 1 x s1>) = G_VMCLR_VL $x0
-    $v0 = COPY %0(<vscale x 1 x s1>)
-    PseudoRET implicit $v0
-
-...
----
-name:            splat_zero_nxv2i1
-legalized:       true
-regBankSelected: false
-body:             |
-  bb.1:
-    ; RV32I-LABEL: name: splat_zero_nxv2i1
-    ; RV32I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_VMCLR_VL $x0
-    ; RV32I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v0
-    ;
-    ; RV64I-LABEL: name: splat_zero_nxv2i1
-    ; RV64I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_VMCLR_VL $x0
-    ; RV64I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v0
-    %0:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
-    $v0 = COPY %0(<vscale x 2 x s1>)
-    PseudoRET implicit $v0
-
-...
----
-name:            splat_zero_nxv4i1
-legalized:       true
-regBankSelected: false
-body:             |
-  bb.1:
-    ; RV32I-LABEL: name: splat_zero_nxv4i1
-    ; RV32I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_VMCLR_VL $x0
-    ; RV32I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 4 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v0
-    ;
-    ; RV64I-LABEL: name: splat_zero_nxv4i1
-    ; RV64I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_VMCLR_VL $x0
-    ; RV64I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 4 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v0
-    %0:_(<vscale x 4 x s1>) = G_VMCLR_VL $x0
-    $v0 = COPY %0(<vscale x 4 x s1>)
-    PseudoRET implicit $v0
-
-...
----
-name:            splat_zero_nxv8i1
-legalized:       true
-regBankSelected: false
-body:             |
-  bb.1:
-    ; RV32I-LABEL: name: splat_zero_nxv8i1
-    ; RV32I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_VMCLR_VL $x0
-    ; RV32I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 8 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v0
-    ;
-    ; RV64I-LABEL: name: splat_zero_nxv8i1
-    ; RV64I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_VMCLR_VL $x0
-    ; RV64I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 8 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v0
-    %0:_(<vscale x 8 x s1>) = G_VMCLR_VL $x0
-    $v0 = COPY %0(<vscale x 8 x s1>)
-    PseudoRET implicit $v0
-
-...
----
-name:            splat_zero_nxv16i1
-legalized:       true
-regBankSelected: false
-body:             |
-  bb.1:
-    ; RV32I-LABEL: name: splat_zero_nxv16i1
-    ; RV32I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_VMCLR_VL $x0
-    ; RV32I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 16 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v0
-    ;
-    ; RV64I-LABEL: name: splat_zero_nxv16i1
-    ; RV64I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_VMCLR_VL $x0
-    ; RV64I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 16 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v0
-    %0:_(<vscale x 16 x s1>) = G_VMCLR_VL $x0
-    $v0 = COPY %0(<vscale x 16 x s1>)
-    PseudoRET implicit $v0
-
-...
----
-name:            splat_zero_nxv32i1
-legalized:       true
-regBankSelected: false
-body:             |
-  bb.1:
-    ; RV32I-LABEL: name: splat_zero_nxv32i1
-    ; RV32I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_VMCLR_VL $x0
-    ; RV32I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 32 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v0
-    ;
-    ; RV64I-LABEL: name: splat_zero_nxv32i1
-    ; RV64I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_VMCLR_VL $x0
-    ; RV64I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 32 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v0
-    %0:_(<vscale x 32 x s1>) = G_VMCLR_VL $x0
-    $v0 = COPY %0(<vscale x 32 x s1>)
-    PseudoRET implicit $v0
-
-...
----
-name:            splat_zero_nxv64i1
-legalized:       true
-regBankSelected: false
-body:             |
-  bb.1:
-    ; RV32I-LABEL: name: splat_zero_nxv64i1
-    ; RV32I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_VMCLR_VL $x0
-    ; RV32I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 64 x s1>)
-    ; RV32I-NEXT: PseudoRET implicit $v0
-    ;
-    ; RV64I-LABEL: name: splat_zero_nxv64i1
-    ; RV64I: [[VMCLR_VL:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_VMCLR_VL $x0
-    ; RV64I-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 64 x s1>)
-    ; RV64I-NEXT: PseudoRET implicit $v0
-    %0:_(<vscale x 64 x s1>) = G_VMCLR_VL $x0
-    $v0 = COPY %0(<vscale x 64 x s1>)
-    PseudoRET implicit $v0
-
-...
-

>From d58058e7821958971239d424adc14a02eccaa21f Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 1 Oct 2024 18:48:06 -0700
Subject: [PATCH 3/4] fixup! fix more tests

---
 .../legalizer/legalize-constbarrier-rv32.mir  |  5 +-
 .../legalizer/legalize-constbarrier-rv64.mir  |  5 +-
 .../rvv/legalize-extract-subvector.mir        | 86 +++++++++++--------
 3 files changed, 56 insertions(+), 40 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv32.mir
index bbe8ef4b092d3b..b1630d9c5f6e0d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv32.mir
@@ -52,8 +52,9 @@ name:            constbarrier_nxv2i1
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: constbarrier_nxv2i1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C]](s32)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL [[C]](s32)
     ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(<vscale x 2 x s1>) = G_CONSTANT_FOLD_BARRIER [[VMCLR_VL]]
     ; CHECK-NEXT: $v8 = COPY [[CONSTANT_FOLD_BARRIER]](<vscale x 2 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v8
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir
index 96b1aa53d46ea3..029205cc6bc224 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-constbarrier-rv64.mir
@@ -70,8 +70,9 @@ name:            constbarrier_nxv2i1
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: constbarrier_nxv2i1
-    ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
-    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C]](s64)
+    ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL [[C]](s64)
     ; CHECK-NEXT: [[CONSTANT_FOLD_BARRIER:%[0-9]+]]:_(<vscale x 2 x s1>) = G_CONSTANT_FOLD_BARRIER [[VMCLR_VL]]
     ; CHECK-NEXT: $v8 = COPY [[CONSTANT_FOLD_BARRIER]](<vscale x 2 x s1>)
     ; CHECK-NEXT: PseudoRET implicit $v8
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir
index 7e172b27eecf1d..ca03482eee0d1d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-extract-subvector.mir
@@ -19,12 +19,13 @@ body:             |
     ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
     ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
     ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C2]](s32)
-    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+    ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C3]](s32)
     ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
-    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 2 x s1>), $x0, 3
+    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 2 x s1>), [[C3]], 3
     ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s8>), 0
-    ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
+    ; RV32-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C4]](s32)
     ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
     ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
@@ -41,12 +42,13 @@ body:             |
     ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
     ; RV64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C2]](s64)
-    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+    ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C3]](s64)
     ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), $x0, 3
+    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[C3]], 3
     ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s8>), 0
-    ; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+    ; RV64-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
     ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
@@ -72,12 +74,13 @@ body:             |
     ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
     ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
     ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C2]](s32)
-    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+    ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C3]](s32)
     ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
-    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 2 x s1>), $x0, 3
+    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 2 x s1>), [[C3]], 3
     ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
-    ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C3]](s32)
+    ; RV32-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; RV32-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C4]](s32)
     ; RV32-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
     ; RV32-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
     ; RV32-NEXT: PseudoRET implicit $v8
@@ -94,12 +97,13 @@ body:             |
     ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
     ; RV64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C2]](s64)
-    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+    ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C3]](s64)
     ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), $x0, 3
+    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[SELECT]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 2 x s1>), [[C3]], 3
     ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
-    ; RV64-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C3]](s32)
+    ; RV64-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; RV64-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
     ; RV64-NEXT: [[SPLAT_VECTOR2:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT2]](s64)
     ; RV64-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[EXTRACT_SUBVECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR2]]
     ; RV64-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
@@ -162,9 +166,10 @@ body:             |
     ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
     ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
-    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL [[C1]](s32)
     ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
-    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[BITCAST]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 4 x s1>), $x0, 3
+    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[BITCAST]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 4 x s1>), [[C1]], 3
     ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
     ; RV32-NEXT: [[BITCAST1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_BITCAST [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>)
     ; RV32-NEXT: $v8 = COPY [[BITCAST1]](<vscale x 32 x s1>)
@@ -176,9 +181,10 @@ body:             |
     ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
     ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
-    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL [[C1]](s64)
     ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[BITCAST]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), $x0, 3
+    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 8 x s8>) = G_VSLIDEDOWN_VL [[DEF1]], [[BITCAST]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 4 x s1>), [[C1]], 3
     ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 8 x s8>), 0
     ; RV64-NEXT: [[BITCAST1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_BITCAST [[EXTRACT_SUBVECTOR]](<vscale x 4 x s8>)
     ; RV64-NEXT: $v8 = COPY [[BITCAST1]](<vscale x 32 x s1>)
@@ -319,8 +325,9 @@ body:             |
     ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
     ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
     ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
-    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
-    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s8>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s32)
+    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s8>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
     ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s8>), 0
     ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s8>)
     ; RV32-NEXT: PseudoRET implicit $v8
@@ -330,8 +337,9 @@ body:             |
     ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
     ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
     ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
-    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
-    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s8>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s64)
+    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s8>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
     ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s8>), 0
     ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s8>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -351,8 +359,9 @@ body:             |
     ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
     ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
     ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
-    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
-    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s16>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s32)
+    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s16>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
     ; RV32-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s16>), 0
     ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s16>)
     ; RV32-NEXT: PseudoRET implicit $v8
@@ -362,8 +371,9 @@ body:             |
     ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
     ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
-    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
-    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s16>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s64)
+    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 4 x s16>) = G_VSLIDEDOWN_VL [[DEF]], [[DEF]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
     ; RV64-NEXT: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 4 x s16>), 0
     ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR]](<vscale x 1 x s16>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -416,9 +426,10 @@ body:             |
     ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
     ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
     ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
-    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s32)
     ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
-    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
     ; RV32-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
     ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8
@@ -429,9 +440,10 @@ body:             |
     ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
     ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
     ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
-    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s64)
     ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
     ; RV64-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
     ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8
@@ -452,9 +464,10 @@ body:             |
     ; RV32-NEXT: [[READ_VLENB:%[0-9]+]]:_(s32) = G_READ_VLENB
     ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
     ; RV32-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[READ_VLENB]], [[C]](s32)
-    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+    ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; RV32-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s32)
     ; RV32-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
-    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+    ; RV32-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s32), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
     ; RV32-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
     ; RV32-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
     ; RV32-NEXT: PseudoRET implicit $v8
@@ -465,9 +478,10 @@ body:             |
     ; RV64-NEXT: [[READ_VLENB:%[0-9]+]]:_(s64) = G_READ_VLENB
     ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
     ; RV64-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[READ_VLENB]], [[C]](s64)
-    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+    ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; RV64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C1]](s64)
     ; RV64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
-    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), $x0, 3
+    ; RV64-NEXT: [[VSLIDEDOWN_VL:%[0-9]+]]:_(<vscale x 2 x s32>) = G_VSLIDEDOWN_VL [[DEF1]], [[EXTRACT_SUBVECTOR]], [[LSHR]](s64), [[VMSET_VL]](<vscale x 1 x s1>), [[C1]], 3
     ; RV64-NEXT: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 1 x s32>) = G_EXTRACT_SUBVECTOR [[VSLIDEDOWN_VL]](<vscale x 2 x s32>), 0
     ; RV64-NEXT: $v8 = COPY [[EXTRACT_SUBVECTOR1]](<vscale x 1 x s32>)
     ; RV64-NEXT: PseudoRET implicit $v8

>From 1a23dc70fe3832fb17550743fd0674734a87c7bb Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 1 Oct 2024 20:55:45 -0700
Subject: [PATCH 4/4] fixup! don't use VLMaxSentinel

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  2 +-
 .../rvv/legalize-splatvector-s64-rv32.mir     | 20 +++++++++++--------
 2 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 412bf9b7bb3bc0..d1449f751b40a0 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -840,7 +840,7 @@ buildDefaultVLOps(const DstOp &Dst, MachineIRBuilder &MIB,
   assert(VecTy.isScalableVector() && "Expecting scalable container type");
   const RISCVSubtarget &STI = MIB.getMF().getSubtarget<RISCVSubtarget>();
   LLT XLenTy(STI.getXLenVT());
-  auto VL = MIB.buildConstant(XLenTy, RISCV::VLMaxSentinel);
+  auto VL = MIB.buildConstant(XLenTy, -1);
   auto Mask = buildAllOnesMask(VecTy, VL, MIB, MRI);
   return {Mask, VL};
 }
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-s64-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-s64-rv32.mir
index 692dce3d9f071e..a80ffb70875470 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-s64-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-s64-rv32.mir
@@ -17,9 +17,10 @@ body:             |
     ;
     ; NoF64-LABEL: name: splatvector_nxv1i64
     ; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
-    ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+    ; NoF64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL [[C]](s32)
     ; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
-    ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF1]], [[DEF]](s32), [[DEF]], $x0
+    ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF1]], [[DEF]](s32), [[DEF]], [[C]](s32)
     ; NoF64-NEXT: $v8 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 1 x s64>)
     ; NoF64-NEXT: PseudoRET implicit $v8
     %0:_(s64) = G_IMPLICIT_DEF
@@ -43,9 +44,10 @@ body:             |
     ;
     ; NoF64-LABEL: name: splatvector_nxv2i64
     ; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
-    ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+    ; NoF64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL [[C]](s32)
     ; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
-    ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF1]], [[DEF]](s32), [[DEF]], $x0
+    ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF1]], [[DEF]](s32), [[DEF]], [[C]](s32)
     ; NoF64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 2 x s64>)
     ; NoF64-NEXT: PseudoRET implicit $v8m2
     %0:_(s64) = G_IMPLICIT_DEF
@@ -69,9 +71,10 @@ body:             |
     ;
     ; NoF64-LABEL: name: splatvector_nxv4i64
     ; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
-    ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+    ; NoF64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL [[C]](s32)
     ; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
-    ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF1]], [[DEF]](s32), [[DEF]], $x0
+    ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF1]], [[DEF]](s32), [[DEF]], [[C]](s32)
     ; NoF64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 4 x s64>)
     ; NoF64-NEXT: PseudoRET implicit $v8m4
     %0:_(s64) = G_IMPLICIT_DEF
@@ -95,9 +98,10 @@ body:             |
     ;
     ; NoF64-LABEL: name: splatvector_nxv8i64
     ; NoF64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
-    ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+    ; NoF64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; NoF64-NEXT: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL [[C]](s32)
     ; NoF64-NEXT: [[DEF1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
-    ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF1]], [[DEF]](s32), [[DEF]], $x0
+    ; NoF64-NEXT: [[SPLAT_VECTOR_SPLIT_I64_VL:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR_SPLIT_I64_VL [[DEF1]], [[DEF]](s32), [[DEF]], [[C]](s32)
     ; NoF64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR_SPLIT_I64_VL]](<vscale x 8 x s64>)
     ; NoF64-NEXT: PseudoRET implicit $v8m8
     %0:_(s64) = G_IMPLICIT_DEF



More information about the llvm-commits mailing list