[llvm] [CodeGen] Fix EVT::changeVectorElementType assertion on simple-to-extended fallback (PR #173413)
Islam Imad via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 28 08:41:57 PST 2025
https://github.com/Islam-Imad updated https://github.com/llvm/llvm-project/pull/173413
>From d960ef04ecdb02e5b3e89e90f7c37ea703d0657f Mon Sep 17 00:00:00 2001
From: Islam-Imad <islamimad404 at gmail.com>
Date: Tue, 23 Dec 2025 21:23:59 +0200
Subject: [PATCH 1/3] [CodeGen] Fix EVT::changeVectorElementType assertion on
simple-to-extended fallback Updated ~100+ call sites across SelectionDAG and
all target backends to pass *DAG.getContext() or the available LLVMContext
parameter. Fixes #171608
---
llvm/include/llvm/CodeGen/ValueTypes.h | 17 +--
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 2 +-
.../SelectionDAG/LegalizeIntegerTypes.cpp | 13 +-
.../SelectionDAG/LegalizeVectorOps.cpp | 2 +-
.../SelectionDAG/LegalizeVectorTypes.cpp | 11 +-
.../SelectionDAG/SelectionDAGBuilder.cpp | 15 +-
.../CodeGen/SelectionDAG/TargetLowering.cpp | 24 ++--
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 2 +-
.../Target/AArch64/AArch64ISelLowering.cpp | 132 ++++++++++--------
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 9 +-
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 12 +-
llvm/lib/Target/ARM/ARMISelLowering.cpp | 10 +-
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 11 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 8 +-
llvm/lib/Target/VE/VEISelLowering.cpp | 6 +-
.../WebAssembly/WebAssemblyISelLowering.cpp | 6 +-
llvm/lib/Target/X86/X86ISelLowering.cpp | 36 ++---
llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 8 +-
18 files changed, 186 insertions(+), 138 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/ValueTypes.h b/llvm/include/llvm/CodeGen/ValueTypes.h
index a91d14af0b204..92cad4884b8fe 100644
--- a/llvm/include/llvm/CodeGen/ValueTypes.h
+++ b/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -99,20 +99,21 @@ namespace llvm {
/// Return a VT for a vector type whose attributes match ourselves
/// with the exception of the element type that is chosen by the caller.
- EVT changeVectorElementType(EVT EltVT) const {
- if (isSimple()) {
- assert(EltVT.isSimple() &&
- "Can't change simple vector VT to have extended element VT");
- return getSimpleVT().changeVectorElementType(EltVT.getSimpleVT());
+ EVT changeVectorElementType(LLVMContext &Context, EVT EltVT) const {
+ if (isSimple() and EltVT.isSimple()) {
+ MVT M = MVT::getVectorVT(EltVT.getSimpleVT(), getVectorElementCount());
+ if (M != MVT::INVALID_SIMPLE_VALUE_TYPE) {
+ return M;
+ }
}
- return changeExtendedVectorElementType(EltVT);
+ return getVectorVT(Context, EltVT, getVectorElementCount());
}
/// Return a VT for a type whose attributes match ourselves with the
/// exception of the element type that is chosen by the caller.
- EVT changeElementType(EVT EltVT) const {
+ EVT changeElementType(LLVMContext &Context, EVT EltVT) const {
EltVT = EltVT.getScalarType();
- return isVector() ? changeVectorElementType(EltVT) : EltVT;
+ return isVector() ? changeVectorElementType(Context, EltVT) : EltVT;
}
/// Return the type converted to an equivalently sized integer or vector
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index ff1e8af8b6c2e..3b5f35ae05054 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -23747,7 +23747,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
if (!IsByteSized) {
EltVT =
EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
- VT = VT.changeElementType(EltVT);
+ VT = VT.changeElementType(*DAG.getContext(), EltVT);
}
// Check if this operation will be handled the default way for its type.
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 67c4eccec962c..e677e6d0e42ef 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -5975,7 +5975,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) {
assert(PromEltVT.bitsLE(NOutVTElem) &&
"Promoted operand has an element type greater than result");
- EVT ExtVT = NOutVT.changeVectorElementType(PromEltVT);
+ EVT ExtVT = NOutVT.changeVectorElementType(*DAG.getContext(), PromEltVT);
SDValue Ext = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), ExtVT, Ops);
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT, Ext);
}
@@ -6146,16 +6146,19 @@ SDValue DAGTypeLegalizer::PromoteIntRes_CONCAT_VECTORS(SDNode *N) {
if (OpVT.getVectorElementType().getScalarSizeInBits() <
MaxElementVT.getScalarSizeInBits())
- Op = DAG.getAnyExtOrTrunc(Op, dl,
- OpVT.changeVectorElementType(MaxElementVT));
+ Op = DAG.getAnyExtOrTrunc(
+ Op, dl,
+ OpVT.changeVectorElementType(*DAG.getContext(), MaxElementVT));
Ops.push_back(Op);
}
// Do the CONCAT on the promoted type and finally truncate to (the promoted)
// NOutVT.
return DAG.getAnyExtOrTrunc(
- DAG.getNode(ISD::CONCAT_VECTORS, dl,
- OutVT.changeVectorElementType(MaxElementVT), Ops),
+ DAG.getNode(
+ ISD::CONCAT_VECTORS, dl,
+ OutVT.changeVectorElementType(*DAG.getContext(), MaxElementVT),
+ Ops),
dl, NOutVT);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 22c5f7dffa80d..b1594838cae7f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1910,7 +1910,7 @@ void VectorLegalizer::ExpandUINT_TO_FLOAT(SDNode *Node,
SDValue UIToFP;
SDValue Result;
SDValue TargetZero = DAG.getIntPtrConstant(0, DL, /*isTarget=*/true);
- EVT FloatVecVT = SrcVT.changeVectorElementType(FPVT);
+ EVT FloatVecVT = SrcVT.changeVectorElementType(*DAG.getContext(), FPVT);
if (IsStrict) {
UIToFP = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {FloatVecVT, MVT::Other},
{Node->getOperand(0), Src});
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 26200a38a300c..af685191d82d8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -2123,7 +2123,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
EVT EltVT = VecVT.getVectorElementType();
if (!EltVT.isByteSized()) {
EltVT = EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
- VecVT = VecVT.changeElementType(EltVT);
+ VecVT = VecVT.changeElementType(*DAG.getContext(), EltVT);
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
// Extend the element type to match if needed.
if (EltVT.bitsGT(Elt.getValueType()))
@@ -4070,7 +4070,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
EVT EltVT = VecVT.getVectorElementType();
if (!EltVT.isByteSized()) {
EltVT = EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
- VecVT = VecVT.changeElementType(EltVT);
+ VecVT = VecVT.changeElementType(*DAG.getContext(), EltVT);
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
SDValue NewExtract =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vec, Idx);
@@ -4619,7 +4619,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_VSETCC(SDNode *N) {
GetSplitVector(N->getOperand(isStrict ? 2 : 1), Lo1, Hi1);
EVT VT = N->getValueType(0);
- EVT PartResVT = Lo0.getValueType().changeElementType(VT.getScalarType());
+ EVT PartResVT = Lo0.getValueType().changeElementType(*DAG.getContext(),
+ VT.getScalarType());
if (Opc == ISD::SETCC) {
LoRes = DAG.getNode(ISD::SETCC, DL, PartResVT, Lo0, Lo1, N->getOperand(2));
@@ -5891,8 +5892,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_ExpOp(SDNode *N) {
EVT ExpVT = RHS.getValueType();
SDValue ExpOp = RHS;
if (ExpVT.isVector()) {
- EVT WideExpVT =
- WidenVT.changeVectorElementType(ExpVT.getVectorElementType());
+ EVT WideExpVT = WidenVT.changeVectorElementType(
+ *DAG.getContext(), ExpVT.getVectorElementType());
ExpOp = ModifyToType(RHS, WideExpVT);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 9e342f9c4416f..ef80ae476e92d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -659,8 +659,9 @@ static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
"Cannot widen to illegal type");
- Val = DAG.getNode(ISD::BITCAST, DL,
- ValueVT.changeVectorElementType(MVT::f16), Val);
+ Val = DAG.getNode(
+ ISD::BITCAST, DL,
+ ValueVT.changeVectorElementType(*DAG.getContext(), MVT::f16), Val);
} else if (PartEVT != ValueEVT) {
return SDValue();
}
@@ -5056,7 +5057,7 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
EVT IdxVT = Index.getValueType();
EVT EltTy = IdxVT.getVectorElementType();
if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
- EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
+ EVT NewIdxVT = IdxVT.changeVectorElementType(*DAG.getContext(), EltTy);
Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
}
@@ -5155,7 +5156,7 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
EVT IdxVT = Index.getValueType();
EVT EltTy = IdxVT.getVectorElementType();
if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
- EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
+ EVT NewIdxVT = IdxVT.changeVectorElementType(*DAG.getContext(), EltTy);
Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
}
@@ -6539,7 +6540,7 @@ void SelectionDAGBuilder::visitVectorHistogram(const CallInst &I,
EVT IdxVT = Index.getValueType();
EVT EltTy = IdxVT.getVectorElementType();
if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
- EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
+ EVT NewIdxVT = IdxVT.changeVectorElementType(*DAG.getContext(), EltTy);
Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
}
@@ -8678,7 +8679,7 @@ void SelectionDAGBuilder::visitVPGather(
EVT IdxVT = Index.getValueType();
EVT EltTy = IdxVT.getVectorElementType();
if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
- EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
+ EVT NewIdxVT = IdxVT.changeVectorElementType(*DAG.getContext(), EltTy);
Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
}
LD = DAG.getGatherVP(
@@ -8744,7 +8745,7 @@ void SelectionDAGBuilder::visitVPScatter(
EVT IdxVT = Index.getValueType();
EVT EltTy = IdxVT.getVectorElementType();
if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
- EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
+ EVT NewIdxVT = IdxVT.changeVectorElementType(*DAG.getContext(), EltTy);
Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
}
ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index d5daba9f5690c..eb2183604a9fe 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -8887,7 +8887,7 @@ SDValue TargetLowering::expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *Node,
SDValue IsZero = DAG.getSetCC(DL, CCVT, MinMax,
DAG.getConstantFP(0.0, DL, VT), ISD::SETEQ);
EVT IntVT = VT.changeTypeToInteger();
- EVT FloatVT = VT.changeElementType(MVT::f32);
+ EVT FloatVT = VT.changeElementType(*DAG.getContext(), MVT::f32);
SDValue LHSTrunc = LHS;
if (!isTypeLegal(IntVT) && !isOperationLegalOrCustom(ISD::IS_FPCLASS, VT)) {
LHSTrunc = DAG.getNode(ISD::FP_ROUND, DL, FloatVT, LHS,
@@ -9673,7 +9673,7 @@ SDValue TargetLowering::expandVectorFindLastActive(SDNode *N,
BoolVT.getTypeForEVT(*DAG.getContext()), MaskVT.getVectorElementCount(),
/*ZeroIsPoison=*/true, &VScaleRange);
EVT StepVT = MVT::getIntegerVT(EltWidth);
- EVT StepVecVT = MaskVT.changeVectorElementType(StepVT);
+ EVT StepVecVT = MaskVT.changeVectorElementType(*DAG.getContext(), StepVT);
// If promotion is required to make the type legal, do it here; promotion
// of integers within LegalizeVectorOps is looking for types of the same
@@ -10635,7 +10635,7 @@ TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask,
if (IsCompressedMemory) {
// Incrementing the pointer according to number of '1's in the mask.
if (DataVT.isScalableVector()) {
- EVT MaskExtVT = MaskVT.changeElementType(MVT::i32);
+ EVT MaskExtVT = MaskVT.changeElementType(*DAG.getContext(), MVT::i32);
SDValue MaskExt = DAG.getNode(ISD::ZERO_EXTEND, DL, MaskExtVT, Mask);
Increment = DAG.getNode(ISD::VECREDUCE_ADD, DL, MVT::i32, MaskExt);
} else {
@@ -11890,7 +11890,9 @@ SDValue TargetLowering::expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const {
// correct for this using a trick explained in: Boldo, Sylvie, and
// Guillaume Melquiond. "When double rounding is odd." 17th IMACS
// World Congress. 2005.
- EVT F32 = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
+ EVT F32 = VT.isVector()
+ ? VT.changeVectorElementType(*DAG.getContext(), MVT::f32)
+ : MVT::f32;
EVT I32 = F32.changeTypeToInteger();
Op = expandRoundInexactToOdd(F32, Op, dl, DAG);
Op = DAG.getNode(ISD::BITCAST, dl, I32, Op);
@@ -11917,7 +11919,9 @@ SDValue TargetLowering::expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const {
Op = DAG.getNode(ISD::SRL, dl, I32, Op,
DAG.getShiftAmountConstant(16, I32, dl));
Op = DAG.getNode(ISD::BITCAST, dl, I32, Op);
- EVT I16 = I32.isVector() ? I32.changeVectorElementType(MVT::i16) : MVT::i16;
+ EVT I16 = I32.isVector()
+ ? I32.changeVectorElementType(*DAG.getContext(), MVT::i16)
+ : MVT::i16;
Op = DAG.getNode(ISD::TRUNCATE, dl, I16, Op);
return DAG.getNode(ISD::BITCAST, dl, VT, Op);
}
@@ -12041,10 +12045,12 @@ SDValue TargetLowering::expandVECTOR_COMPRESS(SDNode *Node,
// overwritten in the loop below.
EVT PopcountVT = ScalarVT.changeTypeToInteger();
SDValue Popcount = DAG.getNode(
- ISD::TRUNCATE, DL, MaskVT.changeVectorElementType(MVT::i1), Mask);
- Popcount =
- DAG.getNode(ISD::ZERO_EXTEND, DL,
- MaskVT.changeVectorElementType(PopcountVT), Popcount);
+ ISD::TRUNCATE, DL,
+ MaskVT.changeVectorElementType(*DAG.getContext(), MVT::i1), Mask);
+ Popcount = DAG.getNode(
+ ISD::ZERO_EXTEND, DL,
+ MaskVT.changeVectorElementType(*DAG.getContext(), PopcountVT),
+ Popcount);
Popcount = DAG.getNode(ISD::VECREDUCE_ADD, DL, PopcountVT, Popcount);
SDValue LastElmtPtr =
getVectorElementPointer(DAG, StackPtr, VecVT, Popcount);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index c9aed1a8d816a..92d9f24b93086 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -7461,7 +7461,7 @@ static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
else
llvm_unreachable("Unexpected MemSDNode!");
- return DataVT.changeVectorElementType(MemVT.getVectorElementType());
+ return DataVT.changeVectorElementType(Ctx, MemVT.getVectorElementType());
}
const unsigned Opcode = Root->getOpcode();
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index fa6ddab8eca0d..98ead7c54233e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4769,8 +4769,8 @@ SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
// Don't round if we had a NaN, we don't want to turn 0x7fffffff into
// 0x80000000.
if (NaN) {
- EVT I1 = I32.changeElementType(MVT::i1);
- EVT CondVT = VT.changeElementType(MVT::i1);
+ EVT I1 = I32.changeElementType(*DAG.getContext(), MVT::i1);
+ EVT CondVT = VT.changeElementType(*DAG.getContext(), MVT::i1);
SDValue IsNaN = DAG.getSetCC(DL, CondVT, SrcVal, SrcVal, ISD::SETUO);
IsNaN = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, I1, IsNaN);
Narrow = DAG.getSelect(DL, I32, IsNaN, NaN, Narrow);
@@ -4792,8 +4792,8 @@ SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
SDLoc DL(Op);
SDValue Narrow = SrcVal;
SDValue NaN;
- EVT I32 = SrcVT.changeElementType(MVT::i32);
- EVT F32 = SrcVT.changeElementType(MVT::f32);
+ EVT I32 = SrcVT.changeElementType(*DAG.getContext(), MVT::i32);
+ EVT F32 = SrcVT.changeElementType(*DAG.getContext(), MVT::f32);
if (SrcVT.getScalarType() == MVT::f32) {
bool NeverSNaN = DAG.isKnownNeverSNaN(Narrow);
Narrow = DAG.getNode(ISD::BITCAST, DL, I32, Narrow);
@@ -4831,7 +4831,7 @@ SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
Narrow = DAG.getNode(ISD::SRL, DL, I32, Narrow,
DAG.getShiftAmountConstant(16, I32, DL));
if (VT.isVector()) {
- EVT I16 = I32.changeVectorElementType(MVT::i16);
+ EVT I16 = I32.changeVectorElementType(*DAG.getContext(), MVT::i16);
Narrow = DAG.getNode(ISD::TRUNCATE, DL, I16, Narrow);
return DAG.getNode(ISD::BITCAST, DL, VT, Narrow);
}
@@ -4868,7 +4868,7 @@ SDValue AArch64TargetLowering::LowerVectorFP_TO_INT(SDValue Op,
// f16 conversions are promoted to f32 when full fp16 is not supported.
if ((InVT.getVectorElementType() == MVT::f16 && !Subtarget->hasFullFP16()) ||
InVT.getVectorElementType() == MVT::bf16) {
- EVT NewVT = VT.changeElementType(MVT::f32);
+ EVT NewVT = VT.changeElementType(*DAG.getContext(), MVT::f32);
SDLoc DL(Op);
if (IsStrict) {
SDValue Ext = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {NewVT, MVT::Other},
@@ -5167,8 +5167,8 @@ SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
assert(VT.isVector() && "Expected vector type");
- EVT CastVT =
- VT.changeVectorElementType(Src.getValueType().getVectorElementType());
+ EVT CastVT = VT.changeVectorElementType(
+ *DAG.getContext(), Src.getValueType().getVectorElementType());
// Round the floating-point value into a floating-point register with the
// current rounding mode.
@@ -5205,7 +5205,7 @@ SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
// Promote bf16 conversions to f32.
if (VT.getVectorElementType() == MVT::bf16) {
- EVT F32 = VT.changeElementType(MVT::f32);
+ EVT F32 = VT.changeElementType(*DAG.getContext(), MVT::f32);
if (IsStrict) {
SDValue Val = DAG.getNode(Op.getOpcode(), DL, {F32, MVT::Other},
{Op.getOperand(0), In});
@@ -5460,7 +5460,8 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
return SDValue();
EVT EltVT = MVT::getIntegerVT(EltSizeInBytes * 8);
- EVT PredVT = getPackedSVEVectorVT(EltVT).changeElementType(MVT::i1);
+ EVT PredVT =
+ getPackedSVEVectorVT(EltVT).changeElementType(*DAG.getContext(), MVT::i1);
// Legal whilewr/rw (lowered by tablegen matcher).
if (PredVT == VT)
@@ -5474,7 +5475,8 @@ AArch64TargetLowering::LowerLOOP_DEPENDENCE_MASK(SDValue Op,
DAG.getNode(Op.getOpcode(), DL, PredVT, to_vector(Op->op_values()));
if (VT.isFixedLengthVector()) {
- EVT WidePredVT = PredVT.changeElementType(VT.getScalarType());
+ EVT WidePredVT =
+ PredVT.changeElementType(*DAG.getContext(), VT.getScalarType());
SDValue MaskAsInt = DAG.getNode(ISD::SIGN_EXTEND, DL, WidePredVT, Mask);
return convertFromScalableVector(DAG, VT, MaskAsInt);
}
@@ -6664,37 +6666,43 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
return DAG.getNode(
AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, DL, Op.getValueType(),
Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
+ DAG.getValueType(Op.getValueType().changeVectorElementType(
+ *DAG.getContext(), MVT::i8)),
Op.getOperand(1));
case Intrinsic::aarch64_sve_sxth:
return DAG.getNode(
AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, DL, Op.getValueType(),
Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
+ DAG.getValueType(Op.getValueType().changeVectorElementType(
+ *DAG.getContext(), MVT::i16)),
Op.getOperand(1));
case Intrinsic::aarch64_sve_sxtw:
return DAG.getNode(
AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, DL, Op.getValueType(),
Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
+ DAG.getValueType(Op.getValueType().changeVectorElementType(
+ *DAG.getContext(), MVT::i32)),
Op.getOperand(1));
case Intrinsic::aarch64_sve_uxtb:
return DAG.getNode(
AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, DL, Op.getValueType(),
Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i8)),
+ DAG.getValueType(Op.getValueType().changeVectorElementType(
+ *DAG.getContext(), MVT::i8)),
Op.getOperand(1));
case Intrinsic::aarch64_sve_uxth:
return DAG.getNode(
AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, DL, Op.getValueType(),
Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i16)),
+ DAG.getValueType(Op.getValueType().changeVectorElementType(
+ *DAG.getContext(), MVT::i16)),
Op.getOperand(1));
case Intrinsic::aarch64_sve_uxtw:
return DAG.getNode(
AArch64ISD::ZERO_EXTEND_INREG_MERGE_PASSTHRU, DL, Op.getValueType(),
Op.getOperand(2), Op.getOperand(3),
- DAG.getValueType(Op.getValueType().changeVectorElementType(MVT::i32)),
+ DAG.getValueType(Op.getValueType().changeVectorElementType(
+ *DAG.getContext(), MVT::i32)),
Op.getOperand(1));
case Intrinsic::localaddress: {
const auto &MF = DAG.getMachineFunction();
@@ -6998,11 +7006,11 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
MemVT = MemVT.changeVectorElementTypeToInteger();
// Find the smallest integer fixed length vector we can use for the gather.
- EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
+ EVT PromotedVT = VT.changeVectorElementType(*DAG.getContext(), MVT::i32);
if (DataVT.getVectorElementType() == MVT::i64 ||
Index.getValueType().getVectorElementType() == MVT::i64 ||
Mask.getValueType().getVectorElementType() == MVT::i64)
- PromotedVT = VT.changeVectorElementType(MVT::i64);
+ PromotedVT = VT.changeVectorElementType(*DAG.getContext(), MVT::i64);
// Promote vector operands except for passthrough, which we know is either
// undef or zero, and thus best constructed directly.
@@ -7017,7 +7025,8 @@ SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
// Convert fixed length vector operands to scalable.
- MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
+ MemVT = ContainerVT.changeVectorElementType(*DAG.getContext(),
+ MemVT.getVectorElementType());
Index = convertToScalableVector(DAG, ContainerVT, Index);
Mask = convertFixedMaskToScalableVector(Mask, DAG);
PassThru = PassThru->isUndef() ? DAG.getUNDEF(ContainerVT)
@@ -7089,11 +7098,11 @@ SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
}
// Find the smallest integer fixed length vector we can use for the scatter.
- EVT PromotedVT = VT.changeVectorElementType(MVT::i32);
+ EVT PromotedVT = VT.changeVectorElementType(*DAG.getContext(), MVT::i32);
if (VT.getVectorElementType() == MVT::i64 ||
Index.getValueType().getVectorElementType() == MVT::i64 ||
Mask.getValueType().getVectorElementType() == MVT::i64)
- PromotedVT = VT.changeVectorElementType(MVT::i64);
+ PromotedVT = VT.changeVectorElementType(*DAG.getContext(), MVT::i64);
// Promote vector operands.
unsigned ExtOpcode = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
@@ -7108,7 +7117,8 @@ SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
EVT ContainerVT = getContainerForFixedLengthVector(DAG, PromotedVT);
// Convert fixed length vector operands to scalable.
- MemVT = ContainerVT.changeVectorElementType(MemVT.getVectorElementType());
+ MemVT = ContainerVT.changeVectorElementType(*DAG.getContext(),
+ MemVT.getVectorElementType());
Index = convertToScalableVector(DAG, ContainerVT, Index);
Mask = convertFixedMaskToScalableVector(Mask, DAG);
StoreVal = convertToScalableVector(DAG, ContainerVT, StoreVal);
@@ -7582,7 +7592,7 @@ static SDValue LowerFLDEXP(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, XVT, DAG.getUNDEF(XVT), X, Zero);
SDValue VExp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ExpVT,
DAG.getUNDEF(ExpVT), Exp, Zero);
- SDValue VPg = DAG.getConstant(1, DL, XVT.changeVectorElementType(MVT::i1));
+ SDValue VPg = DAG.getConstant(1, DL, XVT.changeVectorElementType(*DAG.getContext(), MVT::i1));
SDValue FScale = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, XVT,
DAG.getTargetConstant(Intrinsic::aarch64_sve_fscale, DL, MVT::i64), VPg,
@@ -12289,7 +12299,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op,
SDLoc DL(Op);
// Create a predicate where all but the last -IdxVal elements are false.
- EVT PredVT = Ty.changeVectorElementType(MVT::i1);
+ EVT PredVT = Ty.changeVectorElementType(*DAG.getContext(), MVT::i1);
SDValue Pred = getPTrue(DAG, DL, PredVT, *PredPattern);
Pred = DAG.getNode(ISD::VECTOR_REVERSE, DL, PredVT, Pred);
@@ -16484,7 +16494,7 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
std::optional<unsigned> PredPattern =
getSVEPredPatternFromNumElements(InVT.getVectorNumElements());
- auto PredTy = VT.changeVectorElementType(MVT::i1);
+ auto PredTy = VT.changeVectorElementType(*DAG.getContext(), MVT::i1);
SDValue PTrue = getPTrue(DAG, DL, PredTy, *PredPattern);
SDValue ScalableVec1 = convertToScalableVector(DAG, VT, Vec1);
return DAG.getNode(ISD::VSELECT, DL, VT, PTrue, ScalableVec1, Vec0);
@@ -16938,7 +16948,8 @@ static SDValue getVectorBitwiseReduce(unsigned Opcode, SDValue Vec, EVT VT,
unsigned ExtendOp =
ScalarOpcode == ISD::XOR ? ISD::ANY_EXTEND : ISD::SIGN_EXTEND;
SDValue Extended = DAG.getNode(
- ExtendOp, DL, VecVT.changeVectorElementType(ExtendedVT), Vec);
+ ExtendOp, DL,
+ VecVT.changeVectorElementType(*DAG.getContext(), ExtendedVT), Vec);
// The uminp/uminv and umaxp/umaxv instructions don't have .2d variants, so
// in that case we bitcast the sign extended values from v2i64 to v4i32
// before reduction for optimal code generation.
@@ -19762,7 +19773,8 @@ static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {
SDValue NBV;
SDLoc DL(BV);
if (BV.getOpcode() == ISD::BUILD_VECTOR) {
- EVT PreExtendVT = VT.changeVectorElementType(PreExtendType);
+ EVT PreExtendVT =
+ VT.changeVectorElementType(*DAG.getContext(), PreExtendType);
EVT PreExtendLegalType =
PreExtendType.getScalarSizeInBits() < 32 ? MVT::i32 : PreExtendType;
SmallVector<SDValue, 8> NewOps;
@@ -19772,7 +19784,8 @@ static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {
PreExtendLegalType));
NBV = DAG.getNode(ISD::BUILD_VECTOR, DL, PreExtendVT, NewOps);
} else { // BV.getOpcode() == ISD::VECTOR_SHUFFLE
- EVT PreExtendVT = VT.changeVectorElementType(PreExtendType.getScalarType());
+ EVT PreExtendVT = VT.changeVectorElementType(*DAG.getContext(),
+ PreExtendType.getScalarType());
NBV = DAG.getVectorShuffle(PreExtendVT, DL, BV.getOperand(0).getOperand(0),
BV.getOperand(1).isUndef()
? DAG.getUNDEF(PreExtendVT)
@@ -20267,10 +20280,10 @@ tryToReplaceScalarFPConversionWithSVE(SDNode *N, SelectionDAG &DAG,
EVT DestVecTy;
if (DestTy.bitsGT(SrcTy)) {
DestVecTy = getPackedSVEVectorVT(DestTy);
- SrcVecTy = DestVecTy.changeVectorElementType(SrcTy);
+ SrcVecTy = DestVecTy.changeVectorElementType(*DAG.getContext(), SrcTy);
} else {
SrcVecTy = getPackedSVEVectorVT(SrcTy);
- DestVecTy = SrcVecTy.changeVectorElementType(DestTy);
+ DestVecTy = SrcVecTy.changeVectorElementType(*DAG.getContext(), DestTy);
}
// Ensure the resulting src/dest vector type is legal.
@@ -22006,9 +22019,10 @@ static SDValue performBuildVectorCombine(SDNode *N,
SDValue HalfToSingle =
DAG.getNode(ISD::FP_EXTEND, DL, MVT::v4f32, SrcVec);
SDValue SubvectorIdx = Elt0->getOperand(0)->getOperand(1);
- SDValue Extract = DAG.getNode(
- ISD::EXTRACT_SUBVECTOR, DL, VT.changeVectorElementType(MVT::f32),
- HalfToSingle, SubvectorIdx);
+ SDValue Extract =
+ DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
+ VT.changeVectorElementType(*DAG.getContext(), MVT::f32),
+ HalfToSingle, SubvectorIdx);
return DAG.getNode(ISD::FP_EXTEND, DL, VT, Extract);
}
}
@@ -22041,7 +22055,8 @@ static SDValue performBuildVectorCombine(SDNode *N,
// ResultType's known minimum vector length.
Elt0->getConstantOperandVal(1) % VT.getVectorMinNumElements() == 0) {
SDValue VecToExtend = Elt0->getOperand(0);
- EVT ExtVT = VecToExtend.getValueType().changeVectorElementType(MVT::i32);
+ EVT ExtVT = VecToExtend.getValueType().changeVectorElementType(
+ *DAG.getContext(), MVT::i32);
if (!DAG.getTargetLoweringInfo().isTypeLegal(ExtVT))
return SDValue();
@@ -24178,8 +24193,9 @@ static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG,
unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern);
if (NumElts &&
NumElts * VT.getVectorElementType().getSizeInBits() <= MinSVESize) {
- Mask =
- getPTrue(DAG, DL, VT.changeVectorElementType(MVT::i1), PgPattern);
+ Mask = getPTrue(DAG, DL,
+ VT.changeVectorElementType(*DAG.getContext(), MVT::i1),
+ PgPattern);
SDValue PassThru = DAG.getConstant(0, DL, VT);
SDValue NewLoad = DAG.getMaskedLoad(
VT, DL, MLD->getChain(), MLD->getBasePtr(), MLD->getOffset(), Mask,
@@ -25440,8 +25456,9 @@ static SDValue performMSTORECombine(SDNode *N,
unsigned NumElts = getNumElementsFromSVEPredPattern(PgPattern);
if (NumElts && NumElts * InVT.getVectorElementType().getSizeInBits() <=
MinSVESize) {
- Mask = getPTrue(DAG, DL, InVT.changeVectorElementType(MVT::i1),
- PgPattern);
+ Mask = getPTrue(
+ DAG, DL, InVT.changeVectorElementType(*DAG.getContext(), MVT::i1),
+ PgPattern);
return DAG.getMaskedStore(MST->getChain(), DL, Value.getOperand(0),
MST->getBasePtr(), MST->getOffset(), Mask,
MST->getMemoryVT(), MST->getMemOperand(),
@@ -25541,7 +25558,8 @@ static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
if (DataVT.isFixedLengthVector() && DataVT.getScalarSizeInBits() == 64)
return Changed;
if (ISD::isVectorShrinkable(Index.getNode(), 32, N->isIndexSigned())) {
- EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32);
+ EVT NewIndexVT =
+ IndexVT.changeVectorElementType(*DAG.getContext(), MVT::i32);
Index = DAG.getNode(ISD::TRUNCATE, SDLoc(N), NewIndexVT, Index);
return true;
}
@@ -25582,7 +25600,7 @@ static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
LastElementOffset > std::numeric_limits<int32_t>::max())
return Changed;
- EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32);
+ EVT NewIndexVT = IndexVT.changeVectorElementType(*DAG.getContext(), MVT::i32);
// Stride does not scale explicitly by 'Scale', because it happens in
// the gather/scatter addressing mode.
Index = DAG.getStepVector(SDLoc(N), NewIndexVT, APInt(32, Stride, true));
@@ -26545,7 +26563,8 @@ static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) {
} else
return SDValue();
- return DAG.getNode(ISD::SETCC, DL, UseMVT.changeVectorElementType(MVT::i1),
+ return DAG.getNode(ISD::SETCC, DL,
+ UseMVT.changeVectorElementType(*DAG.getContext(), MVT::i1),
Op0ExtV, Op1ExtV, Op->getOperand(2));
}
@@ -30364,7 +30383,7 @@ static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
EVT VT) {
assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
"Expected legal scalable vector!");
- auto PredTy = VT.changeVectorElementType(MVT::i1);
+ auto PredTy = VT.changeVectorElementType(*DAG.getContext(), MVT::i1);
return DAG.getConstant(1, DL, PredTy);
}
@@ -30423,7 +30442,7 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE(
SDValue Result = NewLoad;
if (VT.isFloatingPoint() && Load->getExtensionType() == ISD::EXTLOAD) {
EVT ExtendVT = ContainerVT.changeVectorElementType(
- Load->getMemoryVT().getVectorElementType());
+ *DAG.getContext(), Load->getMemoryVT().getVectorElementType());
Result = getSVESafeBitCast(ExtendVT, Result, DAG);
Result = DAG.getNode(AArch64ISD::FP_EXTEND_MERGE_PASSTHRU, DL, ContainerVT,
@@ -30540,7 +30559,7 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE(
if (VT.isFloatingPoint() && Store->isTruncatingStore()) {
EVT TruncVT = ContainerVT.changeVectorElementType(
- Store->getMemoryVT().getVectorElementType());
+ *DAG.getContext(), Store->getMemoryVT().getVectorElementType());
MemVT = MemVT.changeTypeToInteger();
NewValue = DAG.getNode(AArch64ISD::FP_ROUND_MERGE_PASSTHRU, DL, TruncVT, Pg,
NewValue, DAG.getTargetConstant(0, DL, MVT::i64),
@@ -30807,7 +30826,8 @@ SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
if (const VTSDNode *VTNode = dyn_cast<VTSDNode>(V)) {
EVT VTArg = VTNode->getVT().getVectorElementType();
- EVT NewVTArg = ContainerVT.changeVectorElementType(VTArg);
+ EVT NewVTArg =
+ ContainerVT.changeVectorElementType(*DAG.getContext(), VTArg);
Operands.push_back(DAG.getValueType(NewVTArg));
continue;
}
@@ -31006,8 +31026,10 @@ AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op,
EVT MaskVT = Op.getOperand(0).getValueType();
EVT MaskContainerVT = getContainerForFixedLengthVector(DAG, MaskVT);
auto Mask = convertToScalableVector(DAG, MaskContainerVT, Op.getOperand(0));
- Mask = DAG.getNode(ISD::TRUNCATE, DL,
- MaskContainerVT.changeVectorElementType(MVT::i1), Mask);
+ Mask = DAG.getNode(
+ ISD::TRUNCATE, DL,
+ MaskContainerVT.changeVectorElementType(*DAG.getContext(), MVT::i1),
+ Mask);
auto ScalableRes = DAG.getNode(ISD::VSELECT, DL, ContainerVT,
Mask, Op1, Op2);
@@ -31110,7 +31132,7 @@ AArch64TargetLowering::LowerFixedLengthFPExtendToSVE(SDValue Op,
EVT SrcVT = Val.getValueType();
EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
EVT ExtendVT = ContainerVT.changeVectorElementType(
- SrcVT.getVectorElementType());
+ *DAG.getContext(), SrcVT.getVectorElementType());
Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
Val = DAG.getNode(ISD::ANY_EXTEND, DL, VT.changeTypeToInteger(), Val);
@@ -31134,7 +31156,7 @@ AArch64TargetLowering::LowerFixedLengthFPRoundToSVE(SDValue Op,
EVT SrcVT = Val.getValueType();
EVT ContainerSrcVT = getContainerForFixedLengthVector(DAG, SrcVT);
EVT RoundVT = ContainerSrcVT.changeVectorElementType(
- VT.getVectorElementType());
+ *DAG.getContext(), VT.getVectorElementType());
SDValue Pg = getPredicateForVector(DAG, DL, RoundVT);
Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
@@ -31178,7 +31200,7 @@ AArch64TargetLowering::LowerFixedLengthIntToFPToSVE(SDValue Op,
return convertFromScalableVector(DAG, VT, Val);
} else {
EVT CvtVT = ContainerSrcVT.changeVectorElementType(
- ContainerDstVT.getVectorElementType());
+ *DAG.getContext(), ContainerDstVT.getVectorElementType());
SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, SrcVT);
Val = convertToScalableVector(DAG, ContainerSrcVT, Val);
@@ -31215,7 +31237,7 @@ AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op,
}
Intrinsic::ID IntID = Intrinsic::aarch64_sve_ld3_sret;
- EVT PredVT = PackedVT.changeVectorElementType(MVT::i1);
+ EVT PredVT = PackedVT.changeVectorElementType(*DAG.getContext(), MVT::i1);
SmallVector<SDValue, 7> Ops;
Ops.push_back(DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains));
@@ -31288,7 +31310,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op,
DAG.CreateStackTemporary(PackedVT.getStoreSize() * 3, Alignment);
Intrinsic::ID IntID = Intrinsic::aarch64_sve_st3;
- EVT PredVT = PackedVT.changeVectorElementType(MVT::i1);
+ EVT PredVT = PackedVT.changeVectorElementType(*DAG.getContext(), MVT::i1);
SmallVector<SDValue, 7> Ops;
Ops.push_back(DAG.getEntryNode());
@@ -31507,7 +31529,7 @@ AArch64TargetLowering::LowerGET_ACTIVE_LANE_MASK(SDValue Op,
SDLoc DL(Op);
EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
- EVT WhileVT = ContainerVT.changeElementType(MVT::i1);
+ EVT WhileVT = ContainerVT.changeElementType(*DAG.getContext(), MVT::i1);
SDValue Mask = DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, DL, WhileVT,
Op.getOperand(0), Op.getOperand(1));
@@ -31534,7 +31556,7 @@ AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op,
if (VT.bitsGT(SrcVT)) {
EVT CvtVT = ContainerDstVT.changeVectorElementType(
- ContainerSrcVT.getVectorElementType());
+ *DAG.getContext(), ContainerSrcVT.getVectorElementType());
SDValue Pg = getPredicateForFixedLengthVector(DAG, DL, VT);
Val = DAG.getNode(ISD::BITCAST, DL, SrcVT.changeTypeToInteger(), Val);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index a6000376a5963..e8e4f8f1ddc2b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4150,7 +4150,8 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
EVT ElementType = VT.getScalarType();
EVT TargetScalarType = ElementType.getHalfSizedIntegerVT(*DAG.getContext());
- EVT TargetType = VT.isVector() ? VT.changeVectorElementType(TargetScalarType)
+ EVT TargetType = VT.isVector() ? VT.changeVectorElementType(*DAG.getContext(),
+ TargetScalarType)
: TargetScalarType;
if (Known.getMinValue().getZExtValue() < TargetScalarType.getSizeInBits())
@@ -4215,7 +4216,8 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
EVT ElementType = VT.getScalarType();
EVT TargetScalarType = ElementType.getHalfSizedIntegerVT(*DAG.getContext());
- EVT TargetType = VT.isVector() ? VT.changeVectorElementType(TargetScalarType)
+ EVT TargetType = VT.isVector() ? VT.changeVectorElementType(*DAG.getContext(),
+ TargetScalarType)
: TargetScalarType;
if (Known.getMinValue().getZExtValue() < TargetScalarType.getSizeInBits())
@@ -4337,7 +4339,8 @@ SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
EVT ElementType = VT.getScalarType();
EVT TargetScalarType = ElementType.getHalfSizedIntegerVT(*DAG.getContext());
- EVT TargetType = VT.isVector() ? VT.changeVectorElementType(TargetScalarType)
+ EVT TargetType = VT.isVector() ? VT.changeVectorElementType(*DAG.getContext(),
+ TargetScalarType)
: TargetScalarType;
if (Known.getMinValue().getZExtValue() < TargetScalarType.getSizeInBits())
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 102ca92856bae..3367c95fe8483 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -7806,8 +7806,9 @@ SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
// Round-inexact-to-odd f64 to f32, then do the final rounding using the
// hardware f32 -> bf16 instruction.
- EVT F32VT = SrcVT.isVector() ? SrcVT.changeVectorElementType(MVT::f32) :
- MVT::f32;
+ EVT F32VT = SrcVT.isVector()
+ ? SrcVT.changeVectorElementType(*DAG.getContext(), MVT::f32)
+ : MVT::f32;
SDValue Rod = expandRoundInexactToOdd(F32VT, Src, DL, DAG);
return DAG.getNode(ISD::FP_ROUND, DL, DstVT, Rod,
DAG.getTargetConstant(0, DL, MVT::i32));
@@ -7954,14 +7955,13 @@ SDValue SITargetLowering::promoteUniformOpToI32(SDValue Op,
EVT OpTy = (Opc != ISD::SETCC) ? Op.getValueType()
: Op->getOperand(0).getValueType();
- auto ExtTy = OpTy.changeElementType(MVT::i32);
+ auto &DAG = DCI.DAG;
+ auto ExtTy = OpTy.changeElementType(*DAG.getContext(), MVT::i32);
if (DCI.isBeforeLegalizeOps() ||
isNarrowingProfitable(Op.getNode(), ExtTy, OpTy))
return SDValue();
- auto &DAG = DCI.DAG;
-
SDLoc DL(Op);
SDValue LHS;
SDValue RHS;
@@ -16542,7 +16542,7 @@ SDValue SITargetLowering::performFMulCombine(SDNode *N,
SelectionDAG &DAG = DCI.DAG;
EVT VT = N->getValueType(0);
EVT ScalarVT = VT.getScalarType();
- EVT IntVT = VT.changeElementType(MVT::i32);
+ EVT IntVT = VT.changeElementType(*DAG.getContext(), MVT::i32);
if (!N->isDivergent() && getSubtarget()->hasSALUFloatInsts() &&
(ScalarVT == MVT::f32 || ScalarVT == MVT::f16)) {
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 9e949fbdaac2b..4fd845fbc07ac 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -16929,10 +16929,12 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
auto ExtendIfNeeded = [&](SDValue A, unsigned ExtendCode) {
EVT AVT = A.getValueType();
if (!AVT.is128BitVector())
- A = DAG.getNode(ExtendCode, dl,
- AVT.changeVectorElementType(MVT::getIntegerVT(
- 128 / AVT.getVectorMinNumElements())),
- A);
+ A = DAG.getNode(
+ ExtendCode, dl,
+ AVT.changeVectorElementType(
+ *DAG.getContext(),
+ MVT::getIntegerVT(128 / AVT.getVectorMinNumElements())),
+ A);
return A;
};
auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index cd0fb1b82ddfe..440d9b5a88b87 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -2487,8 +2487,9 @@ SDValue NVPTXTargetLowering::LowerFP_ROUND(SDValue Op,
// Round-inexact-to-odd f64 to f32, then do the final rounding using
// the hardware f32 -> bf16 instruction.
SDValue rod = TLI->expandRoundInexactToOdd(
- WideVT.isVector() ? WideVT.changeVectorElementType(MVT::f32)
- : MVT::f32,
+ WideVT.isVector()
+ ? WideVT.changeVectorElementType(*DAG.getContext(), MVT::f32)
+ : MVT::f32,
Wide, Loc, DAG);
return DAG.getFPExtendOrRound(rod, Loc, NarrowVT);
}
@@ -2514,8 +2515,10 @@ SDValue NVPTXTargetLowering::LowerFP_EXTEND(SDValue Op,
}
if (WideVT.getScalarType() == MVT::f64 &&
(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78)) {
- EVT F32 = NarrowVT.isVector() ? NarrowVT.changeVectorElementType(MVT::f32)
- : MVT::f32;
+ EVT F32 =
+ NarrowVT.isVector()
+ ? NarrowVT.changeVectorElementType(*DAG.getContext(), MVT::f32)
+ : MVT::f32;
SDLoc Loc(Op);
if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 71) {
Op = DAG.getNode(ISD::FP_EXTEND, Loc, F32, Narrow);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 23a24d184e508..86c820c075f0a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -12561,7 +12561,8 @@ static SDValue widenVectorOpsToi8(SDValue N, const SDLoc &DL,
unsigned NumVals = N->getNumValues();
SDVTList VTs = DAG.getVTList(SmallVector<EVT, 4>(
- NumVals, N.getValueType().changeVectorElementType(MVT::i8)));
+ NumVals,
+ N.getValueType().changeVectorElementType(*DAG.getContext(), MVT::i8)));
SDValue WideN = DAG.getNode(N.getOpcode(), DL, VTs, WideOps);
SmallVector<SDValue, 4> TruncVals;
for (unsigned I = 0; I < NumVals; I++) {
@@ -17350,7 +17351,8 @@ static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &D
EVT ResultVT = EVT::getIntegerVT(C, ActiveBits).getRoundIntegerType(C);
if (ResultVT.bitsLT(VT.getVectorElementType())) {
N = DAG.getNode(ISD::TRUNCATE, DL,
- VT.changeVectorElementType(ResultVT), N);
+ VT.changeVectorElementType(*DAG.getContext(), ResultVT),
+ N);
return true;
}
}
@@ -17383,7 +17385,7 @@ static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &D
return false;
EVT NewEltVT = EVT::getIntegerVT(*DAG.getContext(), NewElen);
- EVT NewVT = SrcVT.changeVectorElementType(NewEltVT);
+ EVT NewVT = SrcVT.changeVectorElementType(*DAG.getContext(), NewEltVT);
SDValue NewExt = DAG.getNode(N0->getOpcode(), DL, NewVT, N0->ops());
SDValue NewShAmtVec = DAG.getConstant(ShAmtV, DL, NewVT);
diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index a339592eb9886..74babefbe102e 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -915,10 +915,10 @@ VETargetLowering::VETargetLowering(const TargetMachine &TM,
computeRegisterProperties(Subtarget->getRegisterInfo());
}
-EVT VETargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
- EVT VT) const {
+EVT VETargetLowering::getSetCCResultType(const DataLayout &,
+ LLVMContext &Context, EVT VT) const {
if (VT.isVector())
- return VT.changeVectorElementType(MVT::i1);
+ return VT.changeVectorElementType(Context, MVT::i1);
return MVT::i32;
}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index d8ae9cd6c39fa..6cebb77a93402 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -3224,7 +3224,8 @@ static SDValue performBitcastCombine(SDNode *N,
DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
{DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32),
DAG.getSExtOrTrunc(N->getOperand(0), DL,
- SrcVT.changeVectorElementType(Width))}),
+ SrcVT.changeVectorElementType(
+ *DAG.getContext(), Width))}),
DL, VT);
}
@@ -3426,8 +3427,9 @@ static SDValue performSETCCCombine(SDNode *N,
if (!cast<ConstantSDNode>(N->getOperand(1)))
return SDValue();
- EVT VecVT = FromVT.changeVectorElementType(MVT::getIntegerVT(128 / NumElts));
auto &DAG = DCI.DAG;
+ EVT VecVT = FromVT.changeVectorElementType(*DAG.getContext(),
+ MVT::getIntegerVT(128 / NumElts));
// setcc (iN (bitcast (vNi1 X))), 0, ne
// ==> any_true (vNi1 X)
if (auto Match = TryMatchTrue<0, ISD::SETNE, false, Intrinsic::wasm_anytrue>(
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 811ffb090d751..58b84650de92a 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -23428,7 +23428,7 @@ static SDValue LowerVectorAllEqual(const SDLoc &DL, SDValue LHS, SDValue RHS,
VT = MVT::getVectorVT(SVT, VT.getSizeInBits() / SVT.getSizeInBits());
LHS = DAG.getBitcast(VT, MaskBits(LHS));
RHS = DAG.getBitcast(VT, MaskBits(RHS));
- EVT BoolVT = VT.changeVectorElementType(MVT::i1);
+ EVT BoolVT = VT.changeVectorElementType(*DAG.getContext(), MVT::i1);
SDValue V = DAG.getSetCC(DL, BoolVT, LHS, RHS, ISD::SETEQ);
V = DAG.getSExtOrTrunc(V, DL, VT);
while (VT.getSizeInBits() > TestSize) {
@@ -34424,7 +34424,9 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
SDValue Res;
if (isSoftF16(SrcVT, Subtarget)) {
- EVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
+ EVT NVT = VT.isVector()
+ ? VT.changeVectorElementType(*DAG.getContext(), MVT::f32)
+ : MVT::f32;
if (IsStrict) {
Res =
DAG.getNode(Opc, dl, {VT, MVT::Other},
@@ -47269,7 +47271,7 @@ static SDValue combineArithReduction(SDNode *ExtElt, SelectionDAG &DAG,
Rdx = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Rdx,
DAG.getUNDEF(MVT::v8i16));
} else {
- EVT ByteVT = VecVT.changeVectorElementType(MVT::i8);
+ EVT ByteVT = VecVT.changeVectorElementType(*DAG.getContext(), MVT::i8);
Rdx = DAG.getNode(ISD::TRUNCATE, DL, ByteVT, Rdx);
if (ByteVT.getSizeInBits() < 128)
Rdx = WidenToV16I8(Rdx, true);
@@ -48207,7 +48209,7 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
if (SelectableLHS || SelectableRHS) {
EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
: RHS.getOperand(0).getValueType();
- EVT SrcCondVT = SrcVT.changeVectorElementType(MVT::i1);
+ EVT SrcCondVT = SrcVT.changeVectorElementType(*DAG.getContext(), MVT::i1);
LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
VT.getSizeInBits());
RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
@@ -49905,7 +49907,7 @@ static SDValue combineMulToPMADDWD(SDNode *N, const SDLoc &DL,
// Convert sext(vXi8) to zext(vXi16 sext(vXi8)) on pre-SSE41 targets
// which will expand the extension.
if (Src.getScalarValueSizeInBits() < 16 && !Subtarget.hasSSE41()) {
- EVT ExtVT = VT.changeVectorElementType(MVT::i16);
+ EVT ExtVT = VT.changeVectorElementType(*DAG.getContext(), MVT::i16);
Src = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, Src);
return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Src);
}
@@ -52172,7 +52174,7 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
// AND(X,SEXT(SETCC())) -> SELECT(SETCC(),X,0)
if (DCI.isAfterLegalizeDAG() && VT.isVector()) {
SDValue X, Y;
- EVT CondVT = VT.changeVectorElementType(MVT::i1);
+ EVT CondVT = VT.changeVectorElementType(*DAG.getContext(), MVT::i1);
if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(CondVT) &&
(VT.is512BitVector() || Subtarget.hasVLX()) &&
(VT.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
@@ -53214,7 +53216,7 @@ static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
// vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
if (SVT == MVT::i8 && InSVT == MVT::i32) {
- EVT MidVT = VT.changeVectorElementType(MVT::i16);
+ EVT MidVT = VT.changeVectorElementType(*DAG.getContext(), MVT::i16);
SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
DAG, Subtarget);
assert(Mid && "Failed to pack!");
@@ -53540,7 +53542,7 @@ reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
EVT CastVT = VT;
if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
EltVT = MVT::f64;
- CastVT = VT.changeVectorElementType(EltVT);
+ CastVT = VT.changeVectorElementType(*DAG.getContext(), EltVT);
}
SDValue Load =
@@ -53673,7 +53675,7 @@ static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
EVT EltVT = VT.getVectorElementType();
if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
EltVT = MVT::f64;
- EVT CastVT = VT.changeVectorElementType(EltVT);
+ EVT CastVT = VT.changeVectorElementType(*DAG.getContext(), EltVT);
Value = DAG.getBitcast(CastVT, Value);
}
SDValue Extract =
@@ -57417,7 +57419,7 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
// FIXME: We could check that the type is legal if we're after legalize
// types, but then we would need to construct test cases where that happens.
if (IndexWidth > 32 && DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
- EVT NewVT = IndexVT.changeVectorElementType(MVT::i32);
+ EVT NewVT = IndexVT.changeVectorElementType(*DAG.getContext(), MVT::i32);
// FIXME: We could support more than just constant fold, but we need to
// careful with costing. A truncate that can be optimized out would be
@@ -57495,7 +57497,7 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
// Make sure the index is either i32 or i64
if (IndexWidth != 32 && IndexWidth != 64) {
MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
- IndexVT = IndexVT.changeVectorElementType(EltVT);
+ IndexVT = IndexVT.changeVectorElementType(*DAG.getContext(), EltVT);
Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
}
@@ -57679,7 +57681,7 @@ static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
VT.getScalarType() != MVT::f16) {
SDLoc dl(N);
- EVT DstVT = InVT.changeVectorElementType(MVT::i32);
+ EVT DstVT = InVT.changeVectorElementType(*DAG.getContext(), MVT::i32);
SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
// UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
@@ -57750,7 +57752,7 @@ static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
VT.getScalarType() != MVT::f16) {
SDLoc dl(N);
- EVT DstVT = InVT.changeVectorElementType(MVT::i32);
+ EVT DstVT = InVT.changeVectorElementType(*DAG.getContext(), MVT::i32);
SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
if (IsStrict)
return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
@@ -57767,7 +57769,7 @@ static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
if (NumSignBits >= (BitWidth - 31)) {
EVT TruncVT = MVT::i32;
if (InVT.isVector())
- TruncVT = InVT.changeVectorElementType(TruncVT);
+ TruncVT = InVT.changeVectorElementType(*DAG.getContext(), TruncVT);
SDLoc dl(N);
if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
@@ -60983,7 +60985,7 @@ static SDValue combineEXTEND_VECTOR_INREG(SDNode *N, SelectionDAG &DAG,
ISD::LoadExtType Ext = Opcode == ISD::SIGN_EXTEND_VECTOR_INREG
? ISD::SEXTLOAD
: ISD::ZEXTLOAD;
- EVT MemVT = VT.changeVectorElementType(SVT);
+ EVT MemVT = VT.changeVectorElementType(*DAG.getContext(), SVT);
if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
SDValue Load = DAG.getExtLoad(
Ext, DL, VT, Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
@@ -61110,12 +61112,12 @@ static SDValue combineFP_EXTEND(SDNode *N, SelectionDAG &DAG,
assert(!IsStrict && "Strict FP doesn't support BF16");
if (VT.getVectorElementType() == MVT::f64) {
- EVT TmpVT = VT.changeVectorElementType(MVT::f32);
+ EVT TmpVT = VT.changeVectorElementType(*DAG.getContext(), MVT::f32);
return DAG.getNode(ISD::FP_EXTEND, dl, VT,
DAG.getNode(ISD::FP_EXTEND, dl, TmpVT, Src));
}
assert(VT.getVectorElementType() == MVT::f32 && "Unexpected fpext");
- EVT NVT = SrcVT.changeVectorElementType(MVT::i32);
+ EVT NVT = SrcVT.changeVectorElementType(*DAG.getContext(), MVT::i32);
Src = DAG.getBitcast(SrcVT.changeTypeToInteger(), Src);
Src = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Src);
Src = DAG.getNode(ISD::SHL, dl, NVT, Src, DAG.getConstant(16, dl, NVT));
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index ae9d0a162011f..80299a639d3a3 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -127,7 +127,7 @@ MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
if (isTypeLegal(MVT::f16)) {
if (VT.isVector() && VT.getVectorElementType() == MVT::bf16)
return getRegisterTypeForCallingConv(
- Context, CC, VT.changeVectorElementType(MVT::f16));
+ Context, CC, VT.changeVectorElementType(Context, MVT::f16));
if (VT == MVT::bf16)
return MVT::f16;
@@ -166,8 +166,8 @@ unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
if (VT.isVector() && VT.getVectorElementType() == MVT::bf16 &&
isTypeLegal(MVT::f16))
- return getNumRegistersForCallingConv(Context, CC,
- VT.changeVectorElementType(MVT::f16));
+ return getNumRegistersForCallingConv(
+ Context, CC, VT.changeVectorElementType(Context, MVT::f16));
return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
}
@@ -199,7 +199,7 @@ unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
// Split vNbf16 vectors according to vNf16.
if (VT.isVector() && VT.getVectorElementType() == MVT::bf16 &&
isTypeLegal(MVT::f16))
- VT = VT.changeVectorElementType(MVT::f16);
+ VT = VT.changeVectorElementType(Context, MVT::f16);
return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
NumIntermediates, RegisterVT);
>From af1262b8628ef1b6927931f6f909b1f12ddaf5e5 Mon Sep 17 00:00:00 2001
From: Islam-Imad <islamimad404 at gmail.com>
Date: Wed, 24 Dec 2025 11:13:56 +0200
Subject: [PATCH 2/3] [SelectionDAG] Simplify EVT construction using
changeElementType
This patch simplifies EVT type construction across the codebase by
utilizing the changeElementType() and changeVectorElementType() helper
functions instead of manual conditional logic.
---
llvm/include/llvm/CodeGen/ValueTypes.h | 5 +--
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 41 +++++++------------
.../SelectionDAG/LegalizeIntegerTypes.cpp | 6 +--
.../SelectionDAG/SelectionDAGBuilder.cpp | 5 +--
.../CodeGen/SelectionDAG/TargetLowering.cpp | 33 +++++----------
.../Target/AArch64/AArch64ISelLowering.cpp | 3 +-
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 12 ++----
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 4 +-
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp | 11 ++---
llvm/lib/Target/X86/X86ISelLowering.cpp | 4 +-
10 files changed, 41 insertions(+), 83 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/ValueTypes.h b/llvm/include/llvm/CodeGen/ValueTypes.h
index 92cad4884b8fe..3897cb8c18127 100644
--- a/llvm/include/llvm/CodeGen/ValueTypes.h
+++ b/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -100,11 +100,10 @@ namespace llvm {
/// Return a VT for a vector type whose attributes match ourselves
/// with the exception of the element type that is chosen by the caller.
EVT changeVectorElementType(LLVMContext &Context, EVT EltVT) const {
- if (isSimple() and EltVT.isSimple()) {
+ if (isSimple() && EltVT.isSimple()) {
MVT M = MVT::getVectorVT(EltVT.getSimpleVT(), getVectorElementCount());
- if (M != MVT::INVALID_SIMPLE_VALUE_TYPE) {
+ if (M != MVT::INVALID_SIMPLE_VALUE_TYPE)
return M;
- }
}
return getVectorVT(Context, EltVT, getVectorElementCount());
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 3b5f35ae05054..24053209f20a5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4261,10 +4261,8 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
// sub 0, (and x, 1) --> SIGN_EXTEND_INREG x, i1
if (N1.getOpcode() == ISD::AND && N1.hasOneUse() &&
isOneOrOneSplat(N1->getOperand(1))) {
- EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), 1);
- if (VT.isVector())
- ExtVT = EVT::getVectorVT(*DAG.getContext(), ExtVT,
- VT.getVectorElementCount());
+ EVT ExtVT = VT.changeElementType(*DAG.getContext(),
+ EVT::getIntegerVT(*DAG.getContext(), 1));
if (TLI.getOperationAction(ISD::SIGN_EXTEND_INREG, ExtVT) ==
TargetLowering::Legal) {
return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N1->getOperand(0),
@@ -6123,10 +6121,8 @@ static SDValue PerformMinMaxFpToSatCombine(SDValue N0, SDValue N1, SDValue N2,
if (!Fp || Fp.getOpcode() != ISD::FP_TO_SINT)
return SDValue();
EVT FPVT = Fp.getOperand(0).getValueType();
- EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), BW);
- if (FPVT.isVector())
- NewVT = EVT::getVectorVT(*DAG.getContext(), NewVT,
- FPVT.getVectorElementCount());
+ EVT NewVT = FPVT.changeElementType(*DAG.getContext(),
+ EVT::getIntegerVT(*DAG.getContext(), BW));
unsigned NewOpc = Unsigned ? ISD::FP_TO_UINT_SAT : ISD::FP_TO_SINT_SAT;
if (!DAG.getTargetLoweringInfo().shouldConvertFpToSat(NewOpc, FPVT, NewVT))
return SDValue();
@@ -6158,10 +6154,8 @@ static SDValue PerformUMinFpToSatCombine(SDValue N0, SDValue N1, SDValue N2,
unsigned BW = (C1 + 1).exactLogBase2();
EVT FPVT = N0.getOperand(0).getValueType();
- EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), BW);
- if (FPVT.isVector())
- NewVT = EVT::getVectorVT(*DAG.getContext(), NewVT,
- FPVT.getVectorElementCount());
+ EVT NewVT = FPVT.changeElementType(*DAG.getContext(),
+ EVT::getIntegerVT(*DAG.getContext(), BW));
if (!DAG.getTargetLoweringInfo().shouldConvertFpToSat(ISD::FP_TO_UINT_SAT,
FPVT, NewVT))
return SDValue();
@@ -11062,10 +11056,8 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
if (N01C) {
LLVMContext &Ctx = *DAG.getContext();
// Determine what the truncate's result bitsize and type would be.
- EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue());
-
- if (VT.isVector())
- TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorElementCount());
+ EVT TruncVT = VT.changeElementType(
+ Ctx, EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue()));
// Determine the residual right-shift amount.
int ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue();
@@ -11107,9 +11099,8 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
// that is a free operation.
LLVMContext &Ctx = *DAG.getContext();
unsigned ShiftAmt = N1C->getZExtValue();
- EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - ShiftAmt);
- if (VT.isVector())
- TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorElementCount());
+ EVT TruncVT = VT.changeElementType(
+ Ctx, EVT::getIntegerVT(Ctx, OpSizeInBits - ShiftAmt));
// TODO: The simple type check probably belongs in the default hook
// implementation and/or target-specific overrides (because
@@ -16088,9 +16079,8 @@ static SDValue foldExtendVectorInregToExtendOfSubvector(
SDValue Src = N->getOperand(0);
EVT VT = N->getValueType(0);
- EVT SrcVT = EVT::getVectorVT(*DAG.getContext(),
- Src.getValueType().getVectorElementType(),
- VT.getVectorElementCount());
+ EVT SrcVT = VT.changeVectorElementType(
+ *DAG.getContext(), Src.getValueType().getVectorElementType());
assert(ISD::isExtVecInRegOpcode(InregOpcode) &&
"Expected EXTEND_VECTOR_INREG dag node in input!");
@@ -18310,10 +18300,9 @@ SDValue DAGCombiner::combineFMulOrFDivWithIntPow2(SDNode *N) {
// BuildLogBase2 may create a new node.
SDLoc DL(N);
// Get Log2 type with same bitwidth as the float type (VT).
- EVT NewIntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits());
- if (VT.isVector())
- NewIntVT = EVT::getVectorVT(*DAG.getContext(), NewIntVT,
- VT.getVectorElementCount());
+ EVT NewIntVT = VT.changeElementType(
+ *DAG.getContext(),
+ EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits()));
SDValue Log2 = BuildLogBase2(Pow2Op, DL, DAG.isKnownNeverZero(Pow2Op),
/*InexpensiveOnly*/ true, NewIntVT);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index e677e6d0e42ef..8af80794a5d87 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -1272,10 +1272,8 @@ static SDValue earlyExpandDIVFIX(SDNode *N, SDValue LHS, SDValue RHS,
SDLoc dl(N);
// Widen the types by a factor of two. This is guaranteed to expand, since it
// will always have enough high bits in the LHS to shift into.
- EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VTSize * 2);
- if (VT.isVector())
- WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT,
- VT.getVectorElementCount());
+ EVT WideVT = VT.changeElementType(
+ *DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(), VTSize * 2));
LHS = DAG.getExtOrTrunc(Signed, LHS, dl, WideVT);
RHS = DAG.getExtOrTrunc(Signed, RHS, dl, WideVT);
SDValue Res = TLI.expandFixedPointDiv(N->getOpcode(), dl, LHS, RHS, Scale,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index ef80ae476e92d..7c0a64a59ed41 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -7665,10 +7665,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
SDValue Op2 = getValue(I.getArgOperand(1));
EVT ResultVT = Op1.getValueType();
- EVT OverflowVT = MVT::i1;
- if (ResultVT.isVector())
- OverflowVT = EVT::getVectorVT(
- *Context, OverflowVT, ResultVT.getVectorElementCount());
+ EVT OverflowVT = ResultVT.changeElementType(*Context, MVT::i1);
SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index eb2183604a9fe..e3300000fa6f4 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -2115,10 +2115,9 @@ bool TargetLowering::SimplifyDemandedBits(
if (std::optional<unsigned> InnerSA =
TLO.DAG.getValidShiftAmount(Op0, DemandedElts, Depth + 2)) {
unsigned LowBits = BitWidth - ShAmt;
- EVT ExtVT = EVT::getIntegerVT(*TLO.DAG.getContext(), LowBits);
- if (VT.isVector())
- ExtVT = EVT::getVectorVT(*TLO.DAG.getContext(), ExtVT,
- VT.getVectorElementCount());
+ EVT ExtVT = VT.changeElementType(
+ *TLO.DAG.getContext(),
+ EVT::getIntegerVT(*TLO.DAG.getContext(), LowBits));
if (*InnerSA == ShAmt) {
if (!TLO.LegalOperations() ||
@@ -6646,10 +6645,8 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
}
// If type twice as wide legal, widen and use a mul plus a shift.
unsigned Size = VT.getScalarSizeInBits();
- EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), Size * 2);
- if (VT.isVector())
- WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT,
- VT.getVectorElementCount());
+ EVT WideVT = VT.changeElementType(
+ *DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(), Size * 2));
// Some targets like AMDGPU try to go from SDIV to SDIVREM which is then
// custom lowered. This is very expensive so avoid it at all costs for
// constant divisors.
@@ -6851,10 +6848,8 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
}
// If type twice as wide legal, widen and use a mul plus a shift.
unsigned Size = VT.getScalarSizeInBits();
- EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), Size * 2);
- if (VT.isVector())
- WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT,
- VT.getVectorElementCount());
+ EVT WideVT = VT.changeElementType(
+ *DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(), Size * 2));
// Some targets like AMDGPU try to go from UDIV to UDIVREM which is then
// custom lowered. This is very expensive so avoid it at all costs for
// constant divisors.
@@ -9089,10 +9084,8 @@ SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op,
// In the general case use integer operations.
unsigned BitSize = OperandVT.getScalarSizeInBits();
- EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), BitSize);
- if (OperandVT.isVector())
- IntVT = EVT::getVectorVT(*DAG.getContext(), IntVT,
- OperandVT.getVectorElementCount());
+ EVT IntVT = OperandVT.changeElementType(
+ *DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(), BitSize));
SDValue OpAsInt = DAG.getBitcast(IntVT, Op);
// Various masks.
@@ -11890,9 +11883,7 @@ SDValue TargetLowering::expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const {
// correct for this using a trick explained in: Boldo, Sylvie, and
// Guillaume Melquiond. "When double rounding is odd." 17th IMACS
// World Congress. 2005.
- EVT F32 = VT.isVector()
- ? VT.changeVectorElementType(*DAG.getContext(), MVT::f32)
- : MVT::f32;
+ EVT F32 = VT.changeElementType(*DAG.getContext(), MVT::f32);
EVT I32 = F32.changeTypeToInteger();
Op = expandRoundInexactToOdd(F32, Op, dl, DAG);
Op = DAG.getNode(ISD::BITCAST, dl, I32, Op);
@@ -11919,9 +11910,7 @@ SDValue TargetLowering::expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const {
Op = DAG.getNode(ISD::SRL, dl, I32, Op,
DAG.getShiftAmountConstant(16, I32, dl));
Op = DAG.getNode(ISD::BITCAST, dl, I32, Op);
- EVT I16 = I32.isVector()
- ? I32.changeVectorElementType(*DAG.getContext(), MVT::i16)
- : MVT::i16;
+ EVT I16 = I32.changeElementType(*DAG.getContext(), MVT::i16);
Op = DAG.getNode(ISD::TRUNCATE, dl, I16, Op);
return DAG.getNode(ISD::BITCAST, dl, VT, Op);
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 98ead7c54233e..eefa4fcab91cd 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -7592,7 +7592,8 @@ static SDValue LowerFLDEXP(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, XVT, DAG.getUNDEF(XVT), X, Zero);
SDValue VExp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ExpVT,
DAG.getUNDEF(ExpVT), Exp, Zero);
- SDValue VPg = DAG.getConstant(1, DL, XVT.changeVectorElementType(*DAG.getContext(), MVT::i1));
+ SDValue VPg = DAG.getConstant(
+ 1, DL, XVT.changeVectorElementType(*DAG.getContext(), MVT::i1));
SDValue FScale = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, XVT,
DAG.getTargetConstant(Intrinsic::aarch64_sve_fscale, DL, MVT::i64), VPg,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index e8e4f8f1ddc2b..ff4c952a32f7e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4150,9 +4150,7 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
EVT ElementType = VT.getScalarType();
EVT TargetScalarType = ElementType.getHalfSizedIntegerVT(*DAG.getContext());
- EVT TargetType = VT.isVector() ? VT.changeVectorElementType(*DAG.getContext(),
- TargetScalarType)
- : TargetScalarType;
+ EVT TargetType = VT.changeElementType(*DAG.getContext(), TargetScalarType);
if (Known.getMinValue().getZExtValue() < TargetScalarType.getSizeInBits())
return SDValue();
@@ -4216,9 +4214,7 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
EVT ElementType = VT.getScalarType();
EVT TargetScalarType = ElementType.getHalfSizedIntegerVT(*DAG.getContext());
- EVT TargetType = VT.isVector() ? VT.changeVectorElementType(*DAG.getContext(),
- TargetScalarType)
- : TargetScalarType;
+ EVT TargetType = VT.changeElementType(*DAG.getContext(), TargetScalarType);
if (Known.getMinValue().getZExtValue() < TargetScalarType.getSizeInBits())
return SDValue();
@@ -4339,9 +4335,7 @@ SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
EVT ElementType = VT.getScalarType();
EVT TargetScalarType = ElementType.getHalfSizedIntegerVT(*DAG.getContext());
- EVT TargetType = VT.isVector() ? VT.changeVectorElementType(*DAG.getContext(),
- TargetScalarType)
- : TargetScalarType;
+ EVT TargetType = VT.changeElementType(*DAG.getContext(), TargetScalarType);
if (Known.getMinValue().getZExtValue() < TargetScalarType.getSizeInBits())
return SDValue();
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 3367c95fe8483..31fbbb87e24be 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -7806,9 +7806,7 @@ SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
// Round-inexact-to-odd f64 to f32, then do the final rounding using the
// hardware f32 -> bf16 instruction.
- EVT F32VT = SrcVT.isVector()
- ? SrcVT.changeVectorElementType(*DAG.getContext(), MVT::f32)
- : MVT::f32;
+ EVT F32VT = SrcVT.changeElementType(*DAG.getContext(), MVT::f32);
SDValue Rod = expandRoundInexactToOdd(F32VT, Src, DL, DAG);
return DAG.getNode(ISD::FP_ROUND, DL, DstVT, Rod,
DAG.getTargetConstant(0, DL, MVT::i32));
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 440d9b5a88b87..3ba1d6bf13eb8 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -2487,10 +2487,8 @@ SDValue NVPTXTargetLowering::LowerFP_ROUND(SDValue Op,
// Round-inexact-to-odd f64 to f32, then do the final rounding using
// the hardware f32 -> bf16 instruction.
SDValue rod = TLI->expandRoundInexactToOdd(
- WideVT.isVector()
- ? WideVT.changeVectorElementType(*DAG.getContext(), MVT::f32)
- : MVT::f32,
- Wide, Loc, DAG);
+ WideVT.changeElementType(*DAG.getContext(), MVT::f32), Wide, Loc,
+ DAG);
return DAG.getFPExtendOrRound(rod, Loc, NarrowVT);
}
}
@@ -2515,10 +2513,7 @@ SDValue NVPTXTargetLowering::LowerFP_EXTEND(SDValue Op,
}
if (WideVT.getScalarType() == MVT::f64 &&
(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78)) {
- EVT F32 =
- NarrowVT.isVector()
- ? NarrowVT.changeVectorElementType(*DAG.getContext(), MVT::f32)
- : MVT::f32;
+ EVT F32 = NarrowVT.changeElementType(*DAG.getContext(), MVT::f32);
SDLoc Loc(Op);
if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 71) {
Op = DAG.getNode(ISD::FP_EXTEND, Loc, F32, Narrow);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 58b84650de92a..aefe861a62a54 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -34424,9 +34424,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
SDValue Res;
if (isSoftF16(SrcVT, Subtarget)) {
- EVT NVT = VT.isVector()
- ? VT.changeVectorElementType(*DAG.getContext(), MVT::f32)
- : MVT::f32;
+ EVT NVT = VT.changeElementType(*DAG.getContext(), MVT::f32);
if (IsStrict) {
Res =
DAG.getNode(Opc, dl, {VT, MVT::Other},
>From 86eedf7650451622e9c029b74d121c50fa992049 Mon Sep 17 00:00:00 2001
From: Islam-Imad <islamimad404 at gmail.com>
Date: Sun, 28 Dec 2025 18:41:34 +0200
Subject: [PATCH 3/3] Refactor redundant function calls
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 3 +--
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 3 +--
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 24053209f20a5..74d00317c3649 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4261,8 +4261,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
// sub 0, (and x, 1) --> SIGN_EXTEND_INREG x, i1
if (N1.getOpcode() == ISD::AND && N1.hasOneUse() &&
isOneOrOneSplat(N1->getOperand(1))) {
- EVT ExtVT = VT.changeElementType(*DAG.getContext(),
- EVT::getIntegerVT(*DAG.getContext(), 1));
+ EVT ExtVT = VT.changeElementType(*DAG.getContext(), MVT::i1);
if (TLI.getOperationAction(ISD::SIGN_EXTEND_INREG, ExtVT) ==
TargetLowering::Legal) {
return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N1->getOperand(0),
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d4e6da2e8b30e..c60f740d37576 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17352,8 +17352,7 @@ static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &D
EVT ResultVT = EVT::getIntegerVT(C, ActiveBits).getRoundIntegerType(C);
if (ResultVT.bitsLT(VT.getVectorElementType())) {
N = DAG.getNode(ISD::TRUNCATE, DL,
- VT.changeVectorElementType(*DAG.getContext(), ResultVT),
- N);
+ VT.changeVectorElementType(C, ResultVT), N);
return true;
}
}
More information about the llvm-commits
mailing list