[llvm] [RISCV] Use Log2SEW=0 for VMNAND/VMSET created for riscv_vmsge(u) intrinsics. (PR #119767)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 13 00:24:38 PST 2024
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/119767
>From f3c9e15e98200ff2448a2f9ad949971bea03ba7e Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 12 Dec 2024 13:16:25 -0800
Subject: [PATCH] [RISCV] Use Log2SEW=0 for VMNAND/VMSET created for
riscv_vmsge(u) intrinsics.
These instructions should always be created with Log2SEW=0 and an LMUL
based on SEW=8. This is used by the vsetvli pass to know these
instructions only care about the ratio and not the specific value.
Not sure if I can construct a specific test thats hows a vsetvli
difference because they were using the SEW and LMUL of the compare
instruction that must comes before. Maybe if another instruction
got scheduled between you could see a vtype toggle.
Looks like I fixed riscv_vmsge(u)_mask intrinsics years ago, but
forgot the unmasked.
---
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 56 ++++++++++++++-------
llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll | 2 +-
2 files changed, 38 insertions(+), 20 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index c3922e38729dc3..884dfc1b30fe61 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1664,32 +1664,50 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
-#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
+#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
case RISCVII::VLMUL::lmulenum: \
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
: RISCV::PseudoVMSLT_VX_##suffix; \
VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \
: RISCV::PseudoVMSGT_VX_##suffix; \
+ break;
+ CASE_VMSLT_OPCODES(LMUL_F8, MF8)
+ CASE_VMSLT_OPCODES(LMUL_F4, MF4)
+ CASE_VMSLT_OPCODES(LMUL_F2, MF2)
+ CASE_VMSLT_OPCODES(LMUL_1, M1)
+ CASE_VMSLT_OPCODES(LMUL_2, M2)
+ CASE_VMSLT_OPCODES(LMUL_4, M4)
+ CASE_VMSLT_OPCODES(LMUL_8, M8)
+#undef CASE_VMSLT_OPCODES
+ }
+ // Mask operations use the LMUL from the mask type.
+ switch (RISCVTargetLowering::getLMUL(VT)) {
+ default:
+ llvm_unreachable("Unexpected LMUL!");
+#define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
+ case RISCVII::VLMUL::lmulenum: \
VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
break;
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
- CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
-#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
+ CASE_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
+ CASE_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
+ CASE_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
+ CASE_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
+ CASE_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
+ CASE_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
+ CASE_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
+#undef CASE_VMNAND_VMSET_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
+ SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
SDValue VL;
selectVLOp(Node->getOperand(3), VL);
// If vmsge(u) with minimum value, expand it to vmset.
if (IsCmpMinimum) {
- ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
+ ReplaceNode(Node,
+ CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, MaskSEW));
return;
}
@@ -1708,7 +1726,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
0);
ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
- {Cmp, Cmp, VL, SEW}));
+ {Cmp, Cmp, VL, MaskSEW}));
return;
}
case Intrinsic::riscv_vmsgeu_mask:
@@ -1742,7 +1760,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
-#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
+#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
case RISCVII::VLMUL::lmulenum: \
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
: RISCV::PseudoVMSLT_VX_##suffix; \
@@ -1751,13 +1769,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
VMSGTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix##_MASK \
: RISCV::PseudoVMSGT_VX_##suffix##_MASK; \
break;
- CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
- CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
- CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
- CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
- CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
- CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
- CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
+ CASE_VMSLT_OPCODES(LMUL_F8, MF8)
+ CASE_VMSLT_OPCODES(LMUL_F4, MF4)
+ CASE_VMSLT_OPCODES(LMUL_F2, MF2)
+ CASE_VMSLT_OPCODES(LMUL_1, M1)
+ CASE_VMSLT_OPCODES(LMUL_2, M2)
+ CASE_VMSLT_OPCODES(LMUL_4, M4)
+ CASE_VMSLT_OPCODES(LMUL_8, M8)
#undef CASE_VMSLT_OPCODES
}
// Mask operations use the LMUL from the mask type.
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
index e42be4faafefcf..d3f57d58c7ab79 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
@@ -2183,7 +2183,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: ret
entry:
More information about the llvm-commits
mailing list