[llvm] 75620fa - [RISCV] Change how we encode AVL operands in vector pseudoinstructions to use GPRNoX0.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 3 09:22:34 PDT 2021


Author: Craig Topper
Date: 2021-09-03T09:19:25-07:00
New Revision: 75620fadf5c391b6450eb658dea6194167f522f7

URL: https://github.com/llvm/llvm-project/commit/75620fadf5c391b6450eb658dea6194167f522f7
DIFF: https://github.com/llvm/llvm-project/commit/75620fadf5c391b6450eb658dea6194167f522f7.diff

LOG: [RISCV] Change how we encode AVL operands in vector pseudoinstructions to use GPRNoX0.

This patch changes the register class to avoid accidentally setting
the AVL operand to X0 through MachineIR optimizations.

There are cases where we really want to use X0, but we can't get that
past the MachineVerifier with the register class as GPRNoX0. So I've
use a 64-bit -1 as a sentinel for X0. All other immediate values should
be uimm5. I convert it to X0 at the earliest possible point in the VSETVLI
insertion pass to avoid touching the rest of the algorithm. In
SelectionDAG lowering I'm using a -1 TargetConstant to hide it from
instruction selection and treat it differently than if the user
used -1. A user -1 should be selected to a register since it doesn't
fit in uimm5.

This is the rest of the changes started in D109110. As mentioned there,
I don't have a failing test from MachineIR optimizations anymore.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D109116

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.h
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
    llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir
    llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
    llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e23844d8ec7c..94b1017311c6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1340,7 +1340,7 @@ getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
   MVT XLenVT = Subtarget.getXLenVT();
   SDValue VL = VecVT.isFixedLengthVector()
                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
-                   : DAG.getRegister(RISCV::X0, XLenVT);
+                   : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
   return {Mask, VL};
@@ -3292,7 +3292,7 @@ SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
 
   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
-                     DAG.getRegister(RISCV::X0, MVT::i64));
+                     DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i64));
 }
 
 // Custom-lower extensions from mask vectors by using a vselect either with 1
@@ -4450,7 +4450,7 @@ SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
   } else
-    VL = DAG.getRegister(RISCV::X0, XLenVT);
+    VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
 
   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
@@ -4486,7 +4486,7 @@ SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
   } else
-    VL = DAG.getRegister(RISCV::X0, XLenVT);
+    VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
 
   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
   return DAG.getMemIntrinsicNode(
@@ -4743,7 +4743,7 @@ SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
 
     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
   } else
-    VL = DAG.getRegister(RISCV::X0, XLenVT);
+    VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
 
   unsigned IntID =
       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
@@ -4824,7 +4824,7 @@ SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
 
     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
   } else
-    VL = DAG.getRegister(RISCV::X0, XLenVT);
+    VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
 
   unsigned IntID =
       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;

diff  --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 2d8d3bd892e4..d2a889449a09 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -430,10 +430,16 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
 
   if (RISCVII::hasVLOp(TSFlags)) {
     const MachineOperand &VLOp = MI.getOperand(NumOperands - 2);
-    if (VLOp.isImm())
-      InstrInfo.setAVLImm(VLOp.getImm());
-    else
+    if (VLOp.isImm()) {
+      int64_t Imm = VLOp.getImm();
+      // Conver the VLMax sentintel to X0 register.
+      if (Imm == RISCV::VLMaxSentinel)
+        InstrInfo.setAVLReg(RISCV::X0);
+      else
+        InstrInfo.setAVLImm(Imm);
+    } else {
       InstrInfo.setAVLReg(VLOp.getReg());
+    }
   } else
     InstrInfo.setAVLReg(RISCV::NoRegister);
   InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic,

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index af2ea2228f1c..77c98aab6aa7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -181,6 +181,11 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
   const RISCVSubtarget &STI;
 };
 
+namespace RISCV {
+// Special immediate for AVL operand of V pseudo instructions to indicate VLMax.
+static constexpr int64_t VLMaxSentinel = -1LL;
+} // namespace RISCV
+
 namespace RISCVVPseudosTable {
 
 struct PseudoInfo {

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index d09d36962106..e8b430031c19 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -23,7 +23,7 @@ def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB",
 // Operand that is allowed to be a register or a 5 bit immediate.
 // This allows us to pick between VSETIVLI and VSETVLI opcodes using the same
 // pseudo instructions.
-def AVL : RegisterOperand<GPR> {
+def AVL : RegisterOperand<GPRNoX0> {
   let OperandNamespace = "RISCVOp";
   let OperandType = "OPERAND_AVL";
 }
@@ -140,7 +140,9 @@ class octuple_to_str<int octuple> {
 def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
 
 // Output pattern for X0 used to represent VLMAX in the pseudo instructions.
-def VLMax : OutPatFrag<(ops), (XLenVT X0)>;
+// We can't use X0 register becuase the AVL operands use GPRNoX0.
+// This must be kept in sync with RISCV::VLMaxSentinel.
+def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
 
 // List of EEW.
 defvar EEWList = [8, 16, 32, 64];

diff  --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
index cadb66aa7d03..f1aad8d0b8c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir
@@ -53,7 +53,7 @@ body: |
     ; CHECK: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3)
     ; CHECK: $x2 = frame-destroy ADDI $x2, 2032
     ; CHECK: PseudoRET
-    %1:gpr = COPY $x11
+    %1:gprnox0 = COPY $x11
     %0:gpr = COPY $x10
     %2:vr = PseudoVLE64_V_M1 %0, %1, 6 :: (load unknown-size from %ir.pa, align 8)
     %3:gpr = ADDI %stack.2, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir b/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir
index 2d389e0cf49f..06343928c8e5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir
@@ -29,7 +29,7 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v0
     ; CHECK: [[COPY1:%[0-9]+]]:vrnov0 = COPY $v1
     ; CHECK: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v2
-    ; CHECK: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], $x0, 6, 1, implicit $vl, implicit $vtype
+    ; CHECK: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], -1, 6, 1, implicit $vl, implicit $vtype
     ; CHECK: [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVNMSUB_VV_M1_]]
     ; CHECK: dead [[COPY2]]:vr = PseudoVSLL_VI_M1 [[COPY2]], 11, $noreg, 6, implicit $vl, implicit $vtype
     ; CHECK: $v0 = COPY [[PseudoVNMSUB_VV_M1_]]
@@ -37,7 +37,7 @@ body:             |
     %0:vr = COPY $v0
     %1:vrnov0 = COPY $v1
     %2:vrnov0 = COPY $v2
-    %0:vr = PseudoVNMSUB_VV_M1 %0, %1, killed %2, $x0, 6, 1, implicit $vl, implicit $vtype
+    %0:vr = PseudoVNMSUB_VV_M1 %0, %1, killed %2, -1, 6, 1, implicit $vl, implicit $vtype
     %3:vr = COPY %0
     %3:vr = PseudoVSLL_VI_M1 %3, 11, $noreg, 6, implicit $vl, implicit $vtype
     $v0 = COPY %0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
index 446186a9e8cc..93ed36abdd48 100644
--- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir
@@ -52,7 +52,7 @@ body:             |
     ; CHECK: $v0 = COPY [[COPY]]
     ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
     ; CHECK: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]]
-    ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, $x0, 6 :: (load (s512) from %ir.a, align 8)
+    ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 :: (load (s512) from %ir.a, align 8)
     ; CHECK: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]]
     ; CHECK: PseudoRET implicit $v8m8
     %1:vr = COPY $v0
@@ -60,7 +60,7 @@ body:             |
     $v0 = COPY %1
     %3:vrm8 = IMPLICIT_DEF
     %4:vrm8nov0 = COPY %3
-    %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, $x0, 6 :: (load (s512) from %ir.a, align 8)
+    %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, -1, 6 :: (load (s512) from %ir.a, align 8)
     $v8m8 = COPY %2
     PseudoRET implicit $v8m8
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
index 165ccc914552..da78be7b4b3a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
@@ -131,7 +131,7 @@ registers:
   - { id: 4, class: gpr }
   - { id: 5, class: gpr }
   - { id: 6, class: vr }
-  - { id: 7, class: gpr }
+  - { id: 7, class: gprnox0 }
   - { id: 8, class: gpr }
 liveins:
   - { reg: '$x10', virtual-reg: '%4' }
@@ -170,7 +170,7 @@ body:             |
     successors: %bb.2(0x30000000), %bb.1(0x50000000)
     liveins: $x10, $x11, $v8, $x12
 
-    %7:gpr = COPY $x12
+    %7:gprnox0 = COPY $x12
     %6:vr = COPY $v8
     %5:gpr = COPY $x11
     %4:gpr = COPY $x10
@@ -204,7 +204,7 @@ registers:
   - { id: 4, class: gpr }
   - { id: 5, class: gpr }
   - { id: 6, class: gpr }
-  - { id: 7, class: gpr }
+  - { id: 7, class: gprnox0 }
   - { id: 8, class: gpr }
 liveins:
   - { reg: '$x10', virtual-reg: '%4' }
@@ -245,7 +245,7 @@ body:             |
     successors: %bb.2(0x30000000), %bb.1(0x50000000)
     liveins: $x10, $x11, $x12, $x13
 
-    %7:gpr = COPY $x13
+    %7:gprnox0 = COPY $x13
     %6:gpr = COPY $x12
     %5:gpr = COPY $x11
     %4:gpr = COPY $x10
@@ -278,7 +278,7 @@ registers:
   - { id: 3, class: gpr }
   - { id: 4, class: vr }
   - { id: 5, class: vr }
-  - { id: 6, class: gpr }
+  - { id: 6, class: gprnox0 }
   - { id: 7, class: gpr }
   - { id: 8, class: gpr }
 liveins:
@@ -319,7 +319,7 @@ body:             |
     successors: %bb.2(0x30000000), %bb.1(0x50000000)
     liveins: $x10, $v8, $v9, $x11
 
-    %6:gpr = COPY $x11
+    %6:gprnox0 = COPY $x11
     %5:vr = COPY $v9
     %4:vr = COPY $v8
     %3:gpr = COPY $x10
@@ -346,7 +346,7 @@ name:            vsetvli_add_or_sub
 alignment:       4
 tracksRegLiveness: true
 registers:
-  - { id: 0, class: gpr }
+  - { id: 0, class: gprnox0 }
   - { id: 1, class: vr }
   - { id: 2, class: vr }
   - { id: 3, class: vr }
@@ -372,7 +372,7 @@ body:             |
   ; CHECK:   [[COPY1:%[0-9]+]]:vr = COPY $v9
   ; CHECK:   [[COPY2:%[0-9]+]]:vr = COPY $v8
   ; CHECK:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK:   [[PseudoVSETVLI:%[0-9]+]]:gpr = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+  ; CHECK:   [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
   ; CHECK:   [[COPY4:%[0-9]+]]:gpr = COPY $x0
   ; CHECK:   BEQ [[COPY3]], [[COPY4]], %bb.2
   ; CHECK:   PseudoBR %bb.1
@@ -395,7 +395,7 @@ body:             |
     %6:vr = COPY $v9
     %5:vr = COPY $v8
     %4:gpr = COPY $x10
-    %0:gpr = PseudoVSETVLI %7, 88, implicit-def dead $vl, implicit-def dead $vtype
+    %0:gprnox0 = PseudoVSETVLI %7, 88, implicit-def dead $vl, implicit-def dead $vtype
     %8:gpr = COPY $x0
     BEQ %4, %8, %bb.2
     PseudoBR %bb.1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
index da2934ff3562..4fda0ad095ca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
@@ -98,7 +98,7 @@ tracksRegLiveness: true
 registers:
   - { id: 0, class: vr }
   - { id: 1, class: vr }
-  - { id: 2, class: gpr }
+  - { id: 2, class: gprnox0 }
   - { id: 3, class: vr }
 liveins:
   - { reg: '$v8', virtual-reg: '%0' }
@@ -120,7 +120,7 @@ body:             |
     ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
     ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
     ; CHECK: PseudoRET implicit $v8
-    %2:gpr = COPY $x10
+    %2:gprnox0 = COPY $x10
     %1:vr = COPY $v9
     %0:vr = COPY $v8
     %3:vr = PseudoVADD_VV_M1 %0, %1, %2, 6
@@ -135,7 +135,7 @@ tracksRegLiveness: true
 registers:
   - { id: 0, class: gpr }
   - { id: 1, class: vr }
-  - { id: 2, class: gpr }
+  - { id: 2, class: gprnox0 }
   - { id: 3, class: vr }
   - { id: 4, class: vr }
 liveins:
@@ -159,7 +159,7 @@ body:             |
     ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
     ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
     ; CHECK: PseudoRET implicit $v8
-    %2:gpr = COPY $x11
+    %2:gprnox0 = COPY $x11
     %1:vr = COPY $v8
     %0:gpr = COPY $x10
     %3:vr = PseudoVLE64_V_M1 %0, %2, 6
@@ -174,7 +174,7 @@ alignment:       4
 tracksRegLiveness: true
 registers:
   - { id: 0, class: gpr }
-  - { id: 1, class: gpr }
+  - { id: 1, class: gprnox0 }
   - { id: 2, class: vr }
   - { id: 3, class: vr }
 liveins:
@@ -197,7 +197,7 @@ body:             |
     ; CHECK: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype
     ; CHECK: $v8 = COPY %3
     ; CHECK: PseudoRET implicit $v8
-    %1:gpr = COPY $x11
+    %1:gprnox0 = COPY $x11
     %0:gpr = COPY $x10
     %2:vr = PseudoVLE32_V_MF2 %0, %1, 5
     early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed %2, %1, 6
@@ -299,7 +299,7 @@ body:             |
     ; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
     ; CHECK: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
     ; CHECK: dead %6:gpr = PseudoVSETVLIX0 $x0, 88, implicit-def $vl, implicit-def $vtype
-    ; CHECK: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, $noreg, 6, implicit $vl, implicit $vtype
+    ; CHECK: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6, implicit $vl, implicit $vtype
     ; CHECK: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
     ; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype
     ; CHECK: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6, implicit $vl, implicit $vtype
@@ -308,7 +308,7 @@ body:             |
     ; CHECK: PseudoRET implicit $x10
     %0:gpr = COPY $x10
     %1:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x)
-    %2:vr = PseudoVMV_V_I_M1 0, $x0, 6
+    %2:vr = PseudoVMV_V_I_M1 0, -1, 6
     %4:vr = IMPLICIT_DEF
     %3:vr = PseudoVREDSUM_VS_M1 %4, killed %1, killed %2, 2, 6
     %5:gpr = PseudoVMV_X_S_M1 killed %3, 6
@@ -324,7 +324,7 @@ registers:
   - { id: 0, class: vr }
   - { id: 1, class: vr }
   - { id: 2, class: gprnox0 }
-  - { id: 3, class: gpr }
+  - { id: 3, class: gprnox0 }
   - { id: 4, class: vr }
 liveins:
   - { reg: '$v8', virtual-reg: '%0' }
@@ -342,14 +342,14 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
     ; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v9
     ; CHECK: [[COPY2:%[0-9]+]]:vr = COPY $v8
-    ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gpr = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
+    ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype
     ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
     ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
     ; CHECK: PseudoRET implicit $v8
     %2:gprnox0 = COPY $x10
     %1:vr = COPY $v9
     %0:vr = COPY $v8
-    %3:gpr = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype
+    %3:gprnox0 = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype
     %4:vr = PseudoVADD_VV_M1 %0, %1, killed %3, 6
     $v8 = COPY %4
     PseudoRET implicit $v8
@@ -362,7 +362,7 @@ tracksRegLiveness: true
 registers:
   - { id: 0, class: gpr }
   - { id: 1, class: vr }
-  - { id: 2, class: gpr }
+  - { id: 2, class: gprnox0 }
   - { id: 3, class: vr }
   - { id: 4, class: vr }
 liveins:
@@ -388,7 +388,7 @@ body:             |
     ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype
     ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]]
     ; CHECK: PseudoRET implicit $v8
-    %2:gpr = COPY $x11
+    %2:gprnox0 = COPY $x11
     %1:vr = COPY $v8
     %0:gpr = COPY $x10
     %3:vr = PseudoVLE64_V_M1 %0, %2, 6

diff  --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
index f092c1e81995..c8ec11341d67 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
@@ -40,7 +40,7 @@ body: |
     ; CHECK: $x2 = frame-destroy ADDI $x2, 16
     ; CHECK: PseudoRET
     %0:gpr = COPY $x10
-    %1:gpr = COPY $x11
+    %1:gprnox0 = COPY $x11
     $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 %0, %1, 6
     PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, %stack.0, $x0
     renamable $v7_v8_v9_v10_v11_v12_v13 = PseudoVRELOAD7_M1 %stack.0, $x0


        


More information about the llvm-commits mailing list