[llvm] [RISCV][MachineCombiner] Add reassociation optimizations for RVV instructions (PR #88307)
Min-Yih Hsu via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 25 09:26:30 PDT 2024
https://github.com/mshockwave updated https://github.com/llvm/llvm-project/pull/88307
>From 69ed35fd27371cb221530fb1b53929fedbd5188e Mon Sep 17 00:00:00 2001
From: Min Hsu <min.hsu at sifive.com>
Date: Mon, 8 Apr 2024 12:23:52 -0700
Subject: [PATCH 1/9] [RISCV][MachineCombiner] Pre-commit test for RVV
reassociations
---
.../RISCV/rvv/vector-reassociations.ll | 254 ++++++++++++++++++
1 file changed, 254 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
new file mode 100644
index 00000000000000..3cb6f3c35286cf
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
@@ -0,0 +1,254 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr='+v' -O3 %s -o - | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32)
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32, i32)
+
+declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32)
+
+declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32)
+
+define <vscale x 1 x i8> @simple_vadd_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: simple_vadd_vv:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vadd.vv v9, v8, v9
+; CHECK-NEXT: vadd.vv v9, v8, v9
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %a,
+ i32 %2)
+
+ %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %b,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %c
+}
+
+define <vscale x 1 x i8> @simple_vadd_vsub_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: simple_vadd_vsub_vv:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsub.vv v9, v8, v9
+; CHECK-NEXT: vadd.vv v9, v8, v9
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %a,
+ i32 %2)
+
+ %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %b,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %c
+}
+
+define <vscale x 1 x i8> @simple_vmul_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: simple_vmul_vv:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vmul.vv v9, v8, v9
+; CHECK-NEXT: vmul.vv v9, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ %b = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %a,
+ i32 %2)
+
+ %c = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %b,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %c
+}
+
+; With passthru and masks.
+define <vscale x 1 x i8> @vadd_vv_passthru(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: vadd_vv_passthru:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vadd.vv v10, v8, v9
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vadd.vv v9, v8, v10
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %a,
+ i32 %2)
+
+ %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %b,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %c
+}
+
+define <vscale x 1 x i8> @vadd_vv_passthru_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: vadd_vv_passthru_negative:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vadd.vv v10, v8, v9
+; CHECK-NEXT: vadd.vv v9, v8, v10
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %a,
+ i32 %2)
+
+ %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %b,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %c
+}
+
+define <vscale x 1 x i8> @vadd_vv_mask(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m) nounwind {
+; CHECK-LABEL: vadd_vv_mask:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vadd.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %m,
+ i32 %2, i32 1)
+
+ %b = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %a,
+ <vscale x 1 x i1> %m,
+ i32 %2, i32 1)
+
+ %c = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %b,
+ <vscale x 1 x i1> %m,
+ i32 %2, i32 1)
+
+ ret <vscale x 1 x i8> %c
+}
+
+define <vscale x 1 x i8> @vadd_vv_mask_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m) nounwind {
+; CHECK-LABEL: vadd_vv_mask_negative:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vadd.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i1> %m,
+ i32 %2, i32 1)
+
+ %b = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %a,
+ <vscale x 1 x i1> %m,
+ i32 %2, i32 1)
+
+ %splat = insertelement <vscale x 1 x i1> poison, i1 1, i32 0
+ %m2 = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %c = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %b,
+ <vscale x 1 x i1> %m2,
+ i32 %2, i32 1)
+
+ ret <vscale x 1 x i8> %c
+}
+
>From 6ac5a84c320e265b47158e936c8458a60811356a Mon Sep 17 00:00:00 2001
From: Min Hsu <min.hsu at sifive.com>
Date: Wed, 10 Apr 2024 11:10:26 -0700
Subject: [PATCH 2/9] [RISCV][MachineCombiner] Add reassociation optimizations
for RVV instructions
This patch covers VADD_VV and VMUL_VV.
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 220 ++++++++++++++++++
llvm/lib/Target/RISCV/RISCVInstrInfo.h | 14 ++
.../RISCV/rvv/vector-reassociations.ll | 14 +-
3 files changed, 241 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 70ac1f8a592e02..14a4e7aa181b3b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1626,8 +1626,184 @@ static bool isFMUL(unsigned Opc) {
}
}
+bool RISCVInstrInfo::isVectorAssociativeAndCommutative(const MachineInstr &Inst,
+ bool Invert) const {
+#define OPCODE_LMUL_CASE(OPC) \
+ case RISCV::OPC##_M1: \
+ case RISCV::OPC##_M2: \
+ case RISCV::OPC##_M4: \
+ case RISCV::OPC##_M8: \
+ case RISCV::OPC##_MF2: \
+ case RISCV::OPC##_MF4: \
+ case RISCV::OPC##_MF8
+
+#define OPCODE_LMUL_MASK_CASE(OPC) \
+ case RISCV::OPC##_M1_MASK: \
+ case RISCV::OPC##_M2_MASK: \
+ case RISCV::OPC##_M4_MASK: \
+ case RISCV::OPC##_M8_MASK: \
+ case RISCV::OPC##_MF2_MASK: \
+ case RISCV::OPC##_MF4_MASK: \
+ case RISCV::OPC##_MF8_MASK
+
+ unsigned Opcode = Inst.getOpcode();
+ if (Invert) {
+ if (auto InvOpcode = getInverseOpcode(Opcode))
+ Opcode = *InvOpcode;
+ else
+ return false;
+ }
+
+ // clang-format off
+ switch (Opcode) {
+ default:
+ return false;
+ OPCODE_LMUL_CASE(PseudoVADD_VV):
+ OPCODE_LMUL_MASK_CASE(PseudoVADD_VV):
+ OPCODE_LMUL_CASE(PseudoVMUL_VV):
+ OPCODE_LMUL_MASK_CASE(PseudoVMUL_VV):
+ OPCODE_LMUL_CASE(PseudoVMULH_VV):
+ OPCODE_LMUL_MASK_CASE(PseudoVMULH_VV):
+ OPCODE_LMUL_CASE(PseudoVMULHU_VV):
+ OPCODE_LMUL_MASK_CASE(PseudoVMULHU_VV):
+ return true;
+ }
+ // clang-format on
+
+#undef OPCODE_LMUL_MASK_CASE
+#undef OPCODE_LMUL_CASE
+}
+
+bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
+ const MachineInstr &MI2) const {
+ if (!areOpcodesEqualOrInverse(MI1.getOpcode(), MI2.getOpcode()))
+ return false;
+
+ // Make sure vtype operands are also the same.
+ const MCInstrDesc &Desc = get(MI1.getOpcode());
+ const uint64_t TSFlags = Desc.TSFlags;
+
+ auto checkImmOperand = [&](unsigned OpIdx) {
+ return MI1.getOperand(OpIdx).getImm() == MI2.getOperand(OpIdx).getImm();
+ };
+
+ auto checkRegOperand = [&](unsigned OpIdx) {
+ return MI1.getOperand(OpIdx).getReg() == MI2.getOperand(OpIdx).getReg();
+ };
+
+ // PassThru
+ if (!checkRegOperand(1))
+ return false;
+
+ // SEW
+ if (RISCVII::hasSEWOp(TSFlags) &&
+ !checkImmOperand(RISCVII::getSEWOpNum(Desc)))
+ return false;
+
+ // Mask
+ // There might be more sophisticated ways to check equality of masks, but
+ // right now we simply check if they're the same virtual register.
+ if (RISCVII::usesMaskPolicy(TSFlags) && !checkRegOperand(4))
+ return false;
+
+ // Tail / Mask policies
+ if (RISCVII::hasVecPolicyOp(TSFlags) &&
+ !checkImmOperand(RISCVII::getVecPolicyOpNum(Desc)))
+ return false;
+
+ // VL
+ if (RISCVII::hasVLOp(TSFlags)) {
+ unsigned OpIdx = RISCVII::getVLOpNum(Desc);
+ const MachineOperand &Op1 = MI1.getOperand(OpIdx);
+ const MachineOperand &Op2 = MI2.getOperand(OpIdx);
+ if (Op1.getType() != Op2.getType())
+ return false;
+ switch (Op1.getType()) {
+ case MachineOperand::MO_Register:
+ if (Op1.getReg() != Op2.getReg())
+ return false;
+ break;
+ case MachineOperand::MO_Immediate:
+ if (Op1.getImm() != Op2.getImm())
+ return false;
+ break;
+ default:
+ llvm_unreachable("Unrecognized VL operand type");
+ }
+ }
+
+ // Rounding modes
+ if (RISCVII::hasRoundModeOp(TSFlags) &&
+ !checkImmOperand(RISCVII::getVLOpNum(Desc) - 1))
+ return false;
+
+ return true;
+}
+
+// Most of our RVV pseudo has passthru operand, so the real operands
+// start from index = 2.
+bool RISCVInstrInfo::hasReassociableVectorSibling(const MachineInstr &Inst,
+ bool &Commuted) const {
+ const MachineBasicBlock *MBB = Inst.getParent();
+ const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+ MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
+ MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(3).getReg());
+
+ // If only one operand has the same or inverse opcode and it's the second
+ // source operand, the operands must be commuted.
+ Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
+ areRVVInstsReassociable(Inst, *MI2);
+ if (Commuted)
+ std::swap(MI1, MI2);
+
+ return areRVVInstsReassociable(Inst, *MI1) &&
+ (isVectorAssociativeAndCommutative(*MI1) ||
+ isVectorAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
+ hasReassociableOperands(*MI1, MBB) &&
+ MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
+}
+
+bool RISCVInstrInfo::hasReassociableOperands(
+ const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
+ if (!isVectorAssociativeAndCommutative(Inst) &&
+ !isVectorAssociativeAndCommutative(Inst, /*Invert=*/true))
+ return TargetInstrInfo::hasReassociableOperands(Inst, MBB);
+
+ const MachineOperand &Op1 = Inst.getOperand(2);
+ const MachineOperand &Op2 = Inst.getOperand(3);
+ const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+
+ // We need virtual register definitions for the operands that we will
+ // reassociate.
+ MachineInstr *MI1 = nullptr;
+ MachineInstr *MI2 = nullptr;
+ if (Op1.isReg() && Op1.getReg().isVirtual())
+ MI1 = MRI.getUniqueVRegDef(Op1.getReg());
+ if (Op2.isReg() && Op2.getReg().isVirtual())
+ MI2 = MRI.getUniqueVRegDef(Op2.getReg());
+
+ // And at least one operand must be defined in MBB.
+ return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
+}
+
+void RISCVInstrInfo::getReassociateOperandIndices(
+ const MachineInstr &Root, unsigned Pattern,
+ std::array<unsigned, 5> &OperandIndices) const {
+ TargetInstrInfo::getReassociateOperandIndices(Root, Pattern, OperandIndices);
+ if (isVectorAssociativeAndCommutative(Root) ||
+ isVectorAssociativeAndCommutative(Root, /*Invert=*/true)) {
+ // Skip the passthrough operand, so add all indices by one.
+ for (unsigned I = 0; I < 5; ++I)
+ ++OperandIndices[I];
+ }
+}
+
bool RISCVInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
bool &Commuted) const {
+ if (isVectorAssociativeAndCommutative(Inst) ||
+ isVectorAssociativeAndCommutative(Inst, /*Invert=*/true))
+ return hasReassociableVectorSibling(Inst, Commuted);
+
if (!TargetInstrInfo::hasReassociableSibling(Inst, Commuted))
return false;
@@ -1647,6 +1823,9 @@ bool RISCVInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
bool RISCVInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
bool Invert) const {
+ if (isVectorAssociativeAndCommutative(Inst, Invert))
+ return true;
+
unsigned Opc = Inst.getOpcode();
if (Invert) {
auto InverseOpcode = getInverseOpcode(Opc);
@@ -1699,6 +1878,38 @@ bool RISCVInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
std::optional<unsigned>
RISCVInstrInfo::getInverseOpcode(unsigned Opcode) const {
+#define RVV_OPC_LMUL_CASE(OPC, INV) \
+ case RISCV::OPC##_M1: \
+ return RISCV::INV##_M1; \
+ case RISCV::OPC##_M2: \
+ return RISCV::INV##_M2; \
+ case RISCV::OPC##_M4: \
+ return RISCV::INV##_M4; \
+ case RISCV::OPC##_M8: \
+ return RISCV::INV##_M8; \
+ case RISCV::OPC##_MF2: \
+ return RISCV::INV##_MF2; \
+ case RISCV::OPC##_MF4: \
+ return RISCV::INV##_MF4; \
+ case RISCV::OPC##_MF8: \
+ return RISCV::INV##_MF8
+
+#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
+ case RISCV::OPC##_M1_MASK: \
+ return RISCV::INV##_M1_MASK; \
+ case RISCV::OPC##_M2_MASK: \
+ return RISCV::INV##_M2_MASK; \
+ case RISCV::OPC##_M4_MASK: \
+ return RISCV::INV##_M4_MASK; \
+ case RISCV::OPC##_M8_MASK: \
+ return RISCV::INV##_M8_MASK; \
+ case RISCV::OPC##_MF2_MASK: \
+ return RISCV::INV##_MF2_MASK; \
+ case RISCV::OPC##_MF4_MASK: \
+ return RISCV::INV##_MF4_MASK; \
+ case RISCV::OPC##_MF8_MASK: \
+ return RISCV::INV##_MF8_MASK
+
switch (Opcode) {
default:
return std::nullopt;
@@ -1722,7 +1933,16 @@ RISCVInstrInfo::getInverseOpcode(unsigned Opcode) const {
return RISCV::SUBW;
case RISCV::SUBW:
return RISCV::ADDW;
+ // clang-format off
+ RVV_OPC_LMUL_CASE(PseudoVADD_VV, PseudoVSUB_VV);
+ RVV_OPC_LMUL_MASK_CASE(PseudoVADD_VV, PseudoVSUB_VV);
+ RVV_OPC_LMUL_CASE(PseudoVSUB_VV, PseudoVADD_VV);
+ RVV_OPC_LMUL_MASK_CASE(PseudoVSUB_VV, PseudoVADD_VV);
+ // clang-format on
}
+
+#undef RVV_OPC_LMUL_MASK_CASE
+#undef RVV_OPC_LMUL_CASE
}
static bool canCombineFPFusedMultiply(const MachineInstr &Root,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 70fe7da85be0e7..4dc30a042b7672 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -266,6 +266,9 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
+ bool hasReassociableOperands(const MachineInstr &Inst,
+ const MachineBasicBlock *MBB) const override;
+
bool hasReassociableSibling(const MachineInstr &Inst,
bool &Commuted) const override;
@@ -274,6 +277,10 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
std::optional<unsigned> getInverseOpcode(unsigned Opcode) const override;
+ void getReassociateOperandIndices(
+ const MachineInstr &Root, unsigned Pattern,
+ std::array<unsigned, 5> &OperandIndices) const override;
+
ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
getSerializableMachineMemOperandTargetFlags() const override;
@@ -297,6 +304,13 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
private:
unsigned getInstBundleLength(const MachineInstr &MI) const;
+
+ bool isVectorAssociativeAndCommutative(const MachineInstr &MI,
+ bool Invert = false) const;
+ bool areRVVInstsReassociable(const MachineInstr &MI1,
+ const MachineInstr &MI2) const;
+ bool hasReassociableVectorSibling(const MachineInstr &Inst,
+ bool &Commuted) const;
};
namespace RISCV {
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
index 3cb6f3c35286cf..7c3d48c3e48a73 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @simple_vadd_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8>
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vadd.vv v9, v8, v9
-; CHECK-NEXT: vadd.vv v9, v8, v9
+; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
@@ -61,7 +61,7 @@ define <vscale x 1 x i8> @simple_vadd_vsub_vv(<vscale x 1 x i8> %0, <vscale x 1
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsub.vv v9, v8, v9
-; CHECK-NEXT: vadd.vv v9, v8, v9
+; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
@@ -91,7 +91,7 @@ define <vscale x 1 x i8> @simple_vmul_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8>
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmul.vv v9, v8, v9
-; CHECK-NEXT: vmul.vv v9, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v8
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: ret
entry:
@@ -124,8 +124,8 @@ define <vscale x 1 x i8> @vadd_vv_passthru(<vscale x 1 x i8> %0, <vscale x 1 x i
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vadd.vv v10, v8, v9
; CHECK-NEXT: vmv1r.v v9, v8
-; CHECK-NEXT: vadd.vv v9, v8, v10
-; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: vadd.vv v9, v8, v8
+; CHECK-NEXT: vadd.vv v8, v9, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
@@ -187,8 +187,8 @@ define <vscale x 1 x i8> @vadd_vv_mask(<vscale x 1 x i8> %0, <vscale x 1 x i8> %
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v9, v8
-; CHECK-NEXT: vadd.vv v9, v8, v10, v0.t
-; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: vadd.vv v9, v8, v8, v0.t
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
>From 18c4a08298805e82a779c8c3c23f0a51e0786cd6 Mon Sep 17 00:00:00 2001
From: Min Hsu <min.hsu at sifive.com>
Date: Wed, 10 Apr 2024 13:43:51 -0700
Subject: [PATCH 3/9] fixup! [RISCV][MachineCombiner] Add reassociation
optimizations for RVV instructions
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 14a4e7aa181b3b..02dac321722a4f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1662,10 +1662,6 @@ bool RISCVInstrInfo::isVectorAssociativeAndCommutative(const MachineInstr &Inst,
OPCODE_LMUL_MASK_CASE(PseudoVADD_VV):
OPCODE_LMUL_CASE(PseudoVMUL_VV):
OPCODE_LMUL_MASK_CASE(PseudoVMUL_VV):
- OPCODE_LMUL_CASE(PseudoVMULH_VV):
- OPCODE_LMUL_MASK_CASE(PseudoVMULH_VV):
- OPCODE_LMUL_CASE(PseudoVMULHU_VV):
- OPCODE_LMUL_MASK_CASE(PseudoVMULHU_VV):
return true;
}
// clang-format on
@@ -1740,7 +1736,7 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
return true;
}
-// Most of our RVV pseudo has passthru operand, so the real operands
+// Most of our RVV pseudos have passthru operand, so the real operands
// start from index = 2.
bool RISCVInstrInfo::hasReassociableVectorSibling(const MachineInstr &Inst,
bool &Commuted) const {
@@ -1792,7 +1788,7 @@ void RISCVInstrInfo::getReassociateOperandIndices(
TargetInstrInfo::getReassociateOperandIndices(Root, Pattern, OperandIndices);
if (isVectorAssociativeAndCommutative(Root) ||
isVectorAssociativeAndCommutative(Root, /*Invert=*/true)) {
- // Skip the passthrough operand, so add all indices by one.
+ // Skip the passthrough operand, so increment all indices by one.
for (unsigned I = 0; I < 5; ++I)
++OperandIndices[I];
}
>From 9af8047b25da344a6bcb4d5756fcc71d4b5ee0bd Mon Sep 17 00:00:00 2001
From: Min Hsu <min.hsu at sifive.com>
Date: Thu, 11 Apr 2024 11:35:41 -0700
Subject: [PATCH 4/9] Check the definition of mask operand (i.e. V0)
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 51 +++++++++++++++++--
.../RISCV/rvv/vector-reassociations.ll | 13 +++--
2 files changed, 53 insertions(+), 11 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 02dac321722a4f..f26e5236280f6d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1675,6 +1675,10 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
if (!areOpcodesEqualOrInverse(MI1.getOpcode(), MI2.getOpcode()))
return false;
+ assert(MI1.getMF() == MI2.getMF());
+ const MachineRegisterInfo *MRI = &MI1.getMF()->getRegInfo();
+ const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
+
// Make sure vtype operands are also the same.
const MCInstrDesc &Desc = get(MI1.getOpcode());
const uint64_t TSFlags = Desc.TSFlags;
@@ -1697,10 +1701,49 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
return false;
// Mask
- // There might be more sophisticated ways to check equality of masks, but
- // right now we simply check if they're the same virtual register.
- if (RISCVII::usesMaskPolicy(TSFlags) && !checkRegOperand(4))
- return false;
+ if (RISCVII::usesMaskPolicy(TSFlags)) {
+ const MachineBasicBlock *MBB = MI1.getParent();
+ const MachineBasicBlock::const_reverse_iterator It1(&MI1);
+ const MachineBasicBlock::const_reverse_iterator It2(&MI2);
+ Register MI1VReg;
+
+ bool SeenMI2 = false;
+ for (auto End = MBB->rend(), It = It1; It != End; ++It) {
+ if (It == It2) {
+ SeenMI2 = true;
+ if (!MI1VReg.isValid())
+ // There is no V0 def between MI1 and MI2; they're sharing the
+ // same V0.
+ break;
+ }
+
+ if (It->definesRegister(RISCV::V0, TRI)) {
+ Register SrcReg =
+ TRI->lookThruCopyLike(It->getOperand(1).getReg(), MRI);
+
+ if (!MI1VReg.isValid()) {
+ // This is the V0 def for MI1.
+ MI1VReg = SrcReg;
+ continue;
+ }
+
+ // Some random mask updates.
+ if (!SeenMI2)
+ continue;
+
+ // This is the V0 def for MI2; check if it's the same as that of
+ // MI1.
+ if (MI1VReg != SrcReg)
+ return false;
+ else
+ break;
+ }
+ }
+
+ // If we haven't encountered MI2, it's likely that this function was
+ // called in a wrong way (e.g. MI1 is before MI2).
+ assert(SeenMI2 && "MI2 is expected to appear before MI1");
+ }
// Tail / Mask policies
if (RISCVII::hasVecPolicyOp(TSFlags) &&
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
index 7c3d48c3e48a73..6435c1c14e061e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll
@@ -215,15 +215,16 @@ entry:
ret <vscale x 1 x i8> %c
}
-define <vscale x 1 x i8> @vadd_vv_mask_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m) nounwind {
+define <vscale x 1 x i8> @vadd_vv_mask_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m, <vscale x 1 x i1> %m2) nounwind {
; CHECK-LABEL: vadd_vv_mask_negative:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vadd.vv v11, v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v9, v8
-; CHECK-NEXT: vadd.vv v9, v8, v10, v0.t
-; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: vadd.vv v9, v8, v11, v0.t
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
@@ -240,8 +241,6 @@ entry:
<vscale x 1 x i1> %m,
i32 %2, i32 1)
- %splat = insertelement <vscale x 1 x i1> poison, i1 1, i32 0
- %m2 = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
%c = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %0,
>From 9ae6a4d880b2690ef40e8e7aeedba9596da5d5ab Mon Sep 17 00:00:00 2001
From: Min Hsu <min.hsu at sifive.com>
Date: Mon, 15 Apr 2024 13:49:01 -0700
Subject: [PATCH 5/9] Address reviewer's comments
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 2 ++
1 file changed, 2 insertions(+)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index f26e5236280f6d..b1a418af94f9b5 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1785,6 +1785,8 @@ bool RISCVInstrInfo::hasReassociableVectorSibling(const MachineInstr &Inst,
bool &Commuted) const {
const MachineBasicBlock *MBB = Inst.getParent();
const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+ assert(RISCVII::isFirstDefTiedToFirstUse(get(Inst.getOpcode())) &&
+ "Expect the present of passthrough operand.");
MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(3).getReg());
>From 9111bcf3b07d31ebef0bd88bcbfbb92242458e72 Mon Sep 17 00:00:00 2001
From: Min Hsu <min.hsu at sifive.com>
Date: Tue, 16 Apr 2024 09:51:07 -0700
Subject: [PATCH 6/9] fixup! Address reviewer's comments
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index b1a418af94f9b5..fe23a42c6c81a3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1718,8 +1718,11 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
}
if (It->definesRegister(RISCV::V0, TRI)) {
- Register SrcReg =
- TRI->lookThruCopyLike(It->getOperand(1).getReg(), MRI);
+ Register SrcReg = It->getOperand(1).getReg();
+ // If it's not VReg it'll be more difficult to track its defs, so
+ // bailing out here just to be safe.
+ if (!SrcReg.isVirtual())
+ return false;
if (!MI1VReg.isValid()) {
// This is the V0 def for MI1.
>From 384cfdeec0057140706b4ba7c39e983c8f8d5320 Mon Sep 17 00:00:00 2001
From: Min Hsu <min.hsu at sifive.com>
Date: Tue, 23 Apr 2024 11:56:42 -0700
Subject: [PATCH 7/9] fixup! fixup! Address reviewer's comments
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 3 +++
1 file changed, 3 insertions(+)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index fe23a42c6c81a3..c9777e83b59d83 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1692,6 +1692,9 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
};
// PassThru
+ // TODO: Potentially we can loosen the condition to consider Root (MI1) to be
+ // associable with Prev (MI2) if Root has NoReg as passthru. In which case we
+ // also need to loosen the condition on vector policies between these.
if (!checkRegOperand(1))
return false;
>From 91f6dded52e2ea9fb12da0bd030311477698c8a8 Mon Sep 17 00:00:00 2001
From: Min Hsu <min.hsu at sifive.com>
Date: Wed, 24 Apr 2024 13:56:22 -0700
Subject: [PATCH 8/9] Address review comments
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index c9777e83b59d83..eb01427c106f8e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1720,7 +1720,7 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
break;
}
- if (It->definesRegister(RISCV::V0, TRI)) {
+ if (It->modifiesRegister(RISCV::V0, TRI)) {
Register SrcReg = It->getOperand(1).getReg();
// If it's not VReg it'll be more difficult to track its defs, so
// bailing out here just to be safe.
@@ -1837,8 +1837,7 @@ void RISCVInstrInfo::getReassociateOperandIndices(
const MachineInstr &Root, unsigned Pattern,
std::array<unsigned, 5> &OperandIndices) const {
TargetInstrInfo::getReassociateOperandIndices(Root, Pattern, OperandIndices);
- if (isVectorAssociativeAndCommutative(Root) ||
- isVectorAssociativeAndCommutative(Root, /*Invert=*/true)) {
+ if (RISCV::getRVVMCOpcode(Root.getOpcode())) {
// Skip the passthrough operand, so increment all indices by one.
for (unsigned I = 0; I < 5; ++I)
++OperandIndices[I];
>From 4aab02fd0ed2e470e522fceeb4defcc36dad17a1 Mon Sep 17 00:00:00 2001
From: Min Hsu <min.hsu at sifive.com>
Date: Thu, 25 Apr 2024 09:23:13 -0700
Subject: [PATCH 9/9] Rename MI1/2 to Root/Prev in areRVVInstsReassociable
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 44 ++++++++++++------------
1 file changed, 22 insertions(+), 22 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index eb01427c106f8e..6c089d1774f40f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1670,30 +1670,30 @@ bool RISCVInstrInfo::isVectorAssociativeAndCommutative(const MachineInstr &Inst,
#undef OPCODE_LMUL_CASE
}
-bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
- const MachineInstr &MI2) const {
- if (!areOpcodesEqualOrInverse(MI1.getOpcode(), MI2.getOpcode()))
+bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &Root,
+ const MachineInstr &Prev) const {
+ if (!areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()))
return false;
- assert(MI1.getMF() == MI2.getMF());
- const MachineRegisterInfo *MRI = &MI1.getMF()->getRegInfo();
+ assert(Root.getMF() == Prev.getMF());
+ const MachineRegisterInfo *MRI = &Root.getMF()->getRegInfo();
const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
// Make sure vtype operands are also the same.
- const MCInstrDesc &Desc = get(MI1.getOpcode());
+ const MCInstrDesc &Desc = get(Root.getOpcode());
const uint64_t TSFlags = Desc.TSFlags;
auto checkImmOperand = [&](unsigned OpIdx) {
- return MI1.getOperand(OpIdx).getImm() == MI2.getOperand(OpIdx).getImm();
+ return Root.getOperand(OpIdx).getImm() == Prev.getOperand(OpIdx).getImm();
};
auto checkRegOperand = [&](unsigned OpIdx) {
- return MI1.getOperand(OpIdx).getReg() == MI2.getOperand(OpIdx).getReg();
+ return Root.getOperand(OpIdx).getReg() == Prev.getOperand(OpIdx).getReg();
};
// PassThru
- // TODO: Potentially we can loosen the condition to consider Root (MI1) to be
- // associable with Prev (MI2) if Root has NoReg as passthru. In which case we
+ // TODO: Potentially we can loosen the condition to consider Root to be
+ // associable with Prev if Root has NoReg as passthru. In which case we
// also need to loosen the condition on vector policies between these.
if (!checkRegOperand(1))
return false;
@@ -1705,9 +1705,9 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
// Mask
if (RISCVII::usesMaskPolicy(TSFlags)) {
- const MachineBasicBlock *MBB = MI1.getParent();
- const MachineBasicBlock::const_reverse_iterator It1(&MI1);
- const MachineBasicBlock::const_reverse_iterator It2(&MI2);
+ const MachineBasicBlock *MBB = Root.getParent();
+ const MachineBasicBlock::const_reverse_iterator It1(&Root);
+ const MachineBasicBlock::const_reverse_iterator It2(&Prev);
Register MI1VReg;
bool SeenMI2 = false;
@@ -1715,7 +1715,7 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
if (It == It2) {
SeenMI2 = true;
if (!MI1VReg.isValid())
- // There is no V0 def between MI1 and MI2; they're sharing the
+ // There is no V0 def between Root and Prev; they're sharing the
// same V0.
break;
}
@@ -1728,7 +1728,7 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
return false;
if (!MI1VReg.isValid()) {
- // This is the V0 def for MI1.
+ // This is the V0 def for Root.
MI1VReg = SrcReg;
continue;
}
@@ -1737,8 +1737,8 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
if (!SeenMI2)
continue;
- // This is the V0 def for MI2; check if it's the same as that of
- // MI1.
+ // This is the V0 def for Prev; check if it's the same as that of
+ // Root.
if (MI1VReg != SrcReg)
return false;
else
@@ -1746,9 +1746,9 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
}
}
- // If we haven't encountered MI2, it's likely that this function was
- // called in a wrong way (e.g. MI1 is before MI2).
- assert(SeenMI2 && "MI2 is expected to appear before MI1");
+ // If we haven't encountered Prev, it's likely that this function was
+ // called in a wrong way (e.g. Root is before Prev).
+ assert(SeenMI2 && "Prev is expected to appear before Root");
}
// Tail / Mask policies
@@ -1759,8 +1759,8 @@ bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &MI1,
// VL
if (RISCVII::hasVLOp(TSFlags)) {
unsigned OpIdx = RISCVII::getVLOpNum(Desc);
- const MachineOperand &Op1 = MI1.getOperand(OpIdx);
- const MachineOperand &Op2 = MI2.getOperand(OpIdx);
+ const MachineOperand &Op1 = Root.getOperand(OpIdx);
+ const MachineOperand &Op2 = Prev.getOperand(OpIdx);
if (Op1.getType() != Op2.getType())
return false;
switch (Op1.getType()) {
More information about the llvm-commits
mailing list