[llvm] a020199 - [RISCV] Assert only valid AVLs in doLocalPostpass are X0 or virtual regs. NFC
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 17 03:35:08 PDT 2024
Author: Luke Lau
Date: 2024-04-17T18:34:56+08:00
New Revision: a02019960b1a693320cd43b0ed6653d95877b94f
URL: https://github.com/llvm/llvm-project/commit/a02019960b1a693320cd43b0ed6653d95877b94f
DIFF: https://github.com/llvm/llvm-project/commit/a02019960b1a693320cd43b0ed6653d95877b94f.diff
LOG: [RISCV] Assert only valid AVLs in doLocalPostpass are X0 or virtual regs. NFC
In vxrm.mir we were running RISCVInsertVSETVLI on pseudos that already had
vsetvlis inserted and their AVLs set to $noreg. (This happened to work
since doLocalPostpass got rid of the extra vsetvli)
This removes the vsetvlis from the test and enforces that the only valid
AVLs we work with are either X0 or virtual registers (or $noreg before
emitVSETVLIs), since we don't handle physical registers properly in
doLocalPostpass.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
llvm/test/CodeGen/RISCV/rvv/vxrm.mir
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index a54a1148cf28b9..6e45f0c703ceb8 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -468,6 +468,7 @@ class VSETVLIInfo {
bool isUnknown() const { return State == Unknown; }
void setAVLReg(Register Reg) {
+ assert(Reg.isVirtual() || Reg == RISCV::X0 || Reg == RISCV::NoRegister);
AVLReg = Reg;
State = AVLIsReg;
}
@@ -1514,12 +1515,9 @@ static bool canMutatePriorConfig(const MachineInstr &PrevMI,
// If the AVL is a register, we need to make sure MI's AVL dominates PrevMI.
// For now just check that PrevMI uses the same virtual register.
- if (AVL.isReg() && AVL.getReg() != RISCV::X0) {
- if (AVL.getReg().isPhysical())
- return false;
- if (!PrevAVL.isReg() || PrevAVL.getReg() != AVL.getReg())
- return false;
- }
+ if (AVL.isReg() && AVL.getReg() != RISCV::X0 &&
+ (!PrevAVL.isReg() || PrevAVL.getReg() != AVL.getReg()))
+ return false;
}
assert(PrevMI.getOperand(2).isImm() && MI.getOperand(2).isImm());
@@ -1543,9 +1541,9 @@ void RISCVInsertVSETVLI::doLocalPostpass(MachineBasicBlock &MBB) {
continue;
}
- Register VRegDef = MI.getOperand(0).getReg();
- if (VRegDef != RISCV::X0 &&
- !(VRegDef.isVirtual() && MRI->use_nodbg_empty(VRegDef)))
+ Register RegDef = MI.getOperand(0).getReg();
+ assert(RegDef == RISCV::X0 || RegDef.isVirtual());
+ if (RegDef != RISCV::X0 && !MRI->use_nodbg_empty(RegDef))
Used.demandVL();
if (NextMI) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm.mir b/llvm/test/CodeGen/RISCV/rvv/vxrm.mir
index 64e191887e092c..a588677bec8e2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm.mir
@@ -11,9 +11,9 @@ body: |
; MIR-LABEL: name: verify_vxrm
; MIR: liveins: $v8, $v9, $x10
; MIR-NEXT: {{ $}}
- ; MIR-NEXT: dead $x0 = PseudoVSETVLI renamable $x10, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype
+ ; MIR-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x10, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype
; MIR-NEXT: WriteVXRMImm 0, implicit-def $vxrm
- ; MIR-NEXT: renamable $v8 = PseudoVAADD_VV_MF8 undef $v8, renamable $v8, renamable $v9, 0, $noreg, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit $vxrm
+ ; MIR-NEXT: renamable $v8 = PseudoVAADD_VV_MF8 undef $v8, killed renamable $v8, killed renamable $v9, 0, $noreg, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype, implicit $vxrm
; MIR-NEXT: PseudoRET implicit $v8
; ASM-LABEL: verify_vxrm:
; ASM: # %bb.0:
@@ -23,8 +23,8 @@ body: |
; ASM-NEXT: ret
%0:vr = COPY $v8
%1:vr = COPY $v9
- dead $x0 = PseudoVSETVLI killed renamable $x10, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype
+ %2:gprnox0 = COPY $x10
%pt:vr = IMPLICIT_DEF
- renamable $v8 = PseudoVAADD_VV_MF8 %pt, killed renamable $v8, killed renamable $v9, 0, $noreg, 3 /* e8 */, 0
+ renamable $v8 = PseudoVAADD_VV_MF8 %pt, %0, %1, 0, %2, 3 /* e8 */, 0
PseudoRET implicit $v8
...
More information about the llvm-commits
mailing list