[llvm] [InstCombine][RISCV] Convert VPIntrinsics with splat operands to splats (PR #65706)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 12 03:57:49 PDT 2023
================
@@ -729,6 +730,96 @@ bool VectorCombine::foldBitcastShuf(Instruction &I) {
return true;
}
+/// VP Intrinsics whose vector operands are both splat values may be simplified
+/// into the scalar version of the operation and the result is splatted. This
+/// can lead to scalarization down the line.
+bool VectorCombine::scalarizeVPIntrinsic(VPIntrinsic &VPI) {
+ Value *Op0 = VPI.getArgOperand(0);
+ Value *Op1 = VPI.getArgOperand(1);
+
+ if (!getSplatValue(Op0) || !getSplatValue(Op1))
+ return false;
+
+ // For the binary VP intrinsics supported here, the result on disabled lanes
+ // is a poison value. For now, only do this simplification if all lanes
+ // are active.
+ // TODO: Relax the condition that all lanes are active by using insertelement
+ // on inactive lanes.
+ auto IsAllTrueMask = [](Value *MaskVal) {
+ if (Value *SplattedVal = getSplatValue(MaskVal))
+ if (auto *ConstValue = dyn_cast<Constant>(SplattedVal))
+ return ConstValue->isAllOnesValue();
+ return false;
+ };
+ if (!IsAllTrueMask(VPI.getArgOperand(2)))
+ return false;
+
+ // Check to make sure we support scalarization of the intrinsic
+ std::set<Intrinsic::ID> SupportedIntrinsics(
+ {Intrinsic::vp_add, Intrinsic::vp_sub, Intrinsic::vp_mul,
+ Intrinsic::vp_ashr, Intrinsic::vp_lshr, Intrinsic::vp_shl,
+ Intrinsic::vp_or, Intrinsic::vp_and, Intrinsic::vp_xor,
+ Intrinsic::vp_fadd, Intrinsic::vp_fsub, Intrinsic::vp_fmul,
+ Intrinsic::vp_sdiv, Intrinsic::vp_udiv, Intrinsic::vp_srem,
+ Intrinsic::vp_urem});
+ Intrinsic::ID IntrID = VPI.getIntrinsicID();
+ if (!SupportedIntrinsics.count(IntrID))
+ return false;
+
+ // Calculate cost of splatting both operands into vectors and the vector
+ // intrinsic
+ VectorType *VecTy = cast<VectorType>(VPI.getType());
+ TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
+ InstructionCost SplatCost =
+ TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind, 0) +
+ TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy);
+
+ // Calculate the cost of the VP Intrinsic
+ SmallVector<Type *, 4> Args;
+ for (Value *V : VPI.args())
+ Args.push_back(V->getType());
+ IntrinsicCostAttributes Attrs(IntrID, VecTy, Args);
+ InstructionCost VectorOpCost = TTI.getIntrinsicInstrCost(Attrs, CostKind);
+ InstructionCost OldCost = 2 * SplatCost + VectorOpCost;
+
+ // Calculate cost of scalarizing
+ std::optional<unsigned> ScalarOpcodeOpt =
+ VPIntrinsic::getFunctionalOpcodeForVP(IntrID);
+ assert(ScalarOpcodeOpt && "Unable to determine scalar opcode");
+ unsigned ScalarOpcode = *ScalarOpcodeOpt;
+
+ InstructionCost ScalarOpCost =
+ TTI.getArithmeticInstrCost(ScalarOpcode, VecTy->getScalarType());
+ InstructionCost NewCost = ScalarOpCost + SplatCost;
----------------
lukel97 wrote:
The existing splats might still be kept around by other instructions, should we check if they have any other users and incorporate that into to the new cost?
https://github.com/llvm/llvm-project/pull/65706
More information about the llvm-commits
mailing list