[llvm] A test PR for #140694 while waiting for #149110 to be accepted (PR #149824)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 21 07:35:04 PDT 2025
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {clang-format}-->
:warning: C/C++ code formatter, clang-format found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
git-clang-format --diff HEAD~1 HEAD --extensions h,cpp -- llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp llvm/lib/Target/AMDGPU/SIISelLowering.cpp llvm/lib/Target/AMDGPU/SIISelLowering.h
``````````
</details>
<details>
<summary>
View the diff from clang-format here.
</summary>
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 66df09dc8..e75f30e0f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4117,7 +4117,7 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
SDLoc SL(N);
SelectionDAG &DAG = DCI.DAG;
- if(SDValue SS = getShiftForReduction(ISD::SHL, LHS, RHS, DAG))
+ if (SDValue SS = getShiftForReduction(ISD::SHL, LHS, RHS, DAG))
return SS;
unsigned RHSVal;
@@ -4220,7 +4220,7 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
SelectionDAG &DAG = DCI.DAG;
SDLoc SL(N);
- if(SDValue SS = getShiftForReduction(ISD::SRA, LHS, RHS, DAG))
+ if (SDValue SS = getShiftForReduction(ISD::SRA, LHS, RHS, DAG))
return SS;
if (VT.getScalarType() != MVT::i64)
@@ -4315,7 +4315,6 @@ SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
return DAG.getNode(ISD::BITCAST, SL, VT, Vec);
}
-
SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SDValue RHS = N->getOperand(1);
@@ -4326,7 +4325,7 @@ SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
SDLoc SL(N);
unsigned RHSVal;
- if(SDValue SS = getShiftForReduction(ISD::SRL, LHS, RHS, DAG))
+ if (SDValue SS = getShiftForReduction(ISD::SRL, LHS, RHS, DAG))
return SS;
if (CRHS) {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index e8ffccbab..f48e03901 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -13374,17 +13374,18 @@ SDValue SITargetLowering::performXorCombine(SDNode *N,
// replaced with source modifiers when the select is lowered to CNDMASK.
// TODO REMOVE: prevents regressions in fneg-modifier-casting.ll
unsigned Opc = LHS.getOpcode();
- if(((Opc == ISD::VSELECT && VT==MVT::v2i32) || (Opc == ISD::SELECT && VT==MVT::i64)) && CRHS && CRHS->getAPIntValue().isSignMask()) {
+ if (((Opc == ISD::VSELECT && VT == MVT::v2i32) ||
+ (Opc == ISD::SELECT && VT == MVT::i64)) &&
+ CRHS && CRHS->getAPIntValue().isSignMask()) {
SDValue CC = LHS->getOperand(0);
SDValue TRUE = LHS->getOperand(1);
SDValue FALSE = LHS->getOperand(2);
SDValue XTrue = DAG.getNode(ISD::XOR, SDLoc(N), VT, TRUE, RHS);
SDValue XFalse = DAG.getNode(ISD::XOR, SDLoc(N), VT, FALSE, RHS);
- SDValue XSelect = DAG.getNode(ISD::VSELECT, SDLoc(N), VT, CC, XTrue, XFalse);
+ SDValue XSelect =
+ DAG.getNode(ISD::VSELECT, SDLoc(N), VT, CC, XTrue, XFalse);
return XSelect;
}
-
-
// Make sure to apply the 64-bit constant splitting fold before trying to fold
// fneg-like xors into 64-bit select.
@@ -14390,125 +14391,126 @@ bool SITargetLowering::shouldExpandVectorDynExt(SDNode *N) const {
// return SDValue();
// }
- SDValue SITargetLowering::performExtractVectorEltCombine(
- SDNode * N, DAGCombinerInfo & DCI) const {
- SDValue Vec = N->getOperand(0);
- SelectionDAG &DAG = DCI.DAG;
-
- EVT VecVT = Vec.getValueType();
- EVT VecEltVT = VecVT.getVectorElementType();
- EVT ResVT = N->getValueType(0);
+SDValue
+SITargetLowering::performExtractVectorEltCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SDValue Vec = N->getOperand(0);
+ SelectionDAG &DAG = DCI.DAG;
- unsigned VecSize = VecVT.getSizeInBits();
- unsigned VecEltSize = VecEltVT.getSizeInBits();
+ EVT VecVT = Vec.getValueType();
+ EVT VecEltVT = VecVT.getVectorElementType();
+ EVT ResVT = N->getValueType(0);
- if ((Vec.getOpcode() == ISD::FNEG || Vec.getOpcode() == ISD::FABS) &&
- allUsesHaveSourceMods(N)) {
- SDLoc SL(N);
- SDValue Idx = N->getOperand(1);
- SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, ResVT,
- Vec.getOperand(0), Idx);
- return DAG.getNode(Vec.getOpcode(), SL, ResVT, Elt);
- }
-
- // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
- // =>
- // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
- // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
- // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
- if (Vec.hasOneUse() && DCI.isBeforeLegalize() && VecEltVT == ResVT) {
- SDLoc SL(N);
- SDValue Idx = N->getOperand(1);
- unsigned Opc = Vec.getOpcode();
+ unsigned VecSize = VecVT.getSizeInBits();
+ unsigned VecEltSize = VecEltVT.getSizeInBits();
- switch (Opc) {
- default:
- break;
- // TODO: Support other binary operations.
- case ISD::FADD:
- case ISD::FSUB:
- case ISD::FMUL:
- case ISD::ADD:
- case ISD::UMIN:
- case ISD::UMAX:
- case ISD::SMIN:
- case ISD::SMAX:
- case ISD::FMAXNUM:
- case ISD::FMINNUM:
- case ISD::FMAXNUM_IEEE:
- case ISD::FMINNUM_IEEE:
- case ISD::FMAXIMUM:
- case ISD::FMINIMUM: {
- SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, ResVT,
- Vec.getOperand(0), Idx);
- SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, ResVT,
- Vec.getOperand(1), Idx);
-
- DCI.AddToWorklist(Elt0.getNode());
- DCI.AddToWorklist(Elt1.getNode());
- return DAG.getNode(Opc, SL, ResVT, Elt0, Elt1, Vec->getFlags());
- }
- }
- }
+ if ((Vec.getOpcode() == ISD::FNEG || Vec.getOpcode() == ISD::FABS) &&
+ allUsesHaveSourceMods(N)) {
+ SDLoc SL(N);
+ SDValue Idx = N->getOperand(1);
+ SDValue Elt =
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, ResVT, Vec.getOperand(0), Idx);
+ return DAG.getNode(Vec.getOpcode(), SL, ResVT, Elt);
+ }
+
+ // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx)
+ // =>
+ // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx)
+ // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx)
+ // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt
+ if (Vec.hasOneUse() && DCI.isBeforeLegalize() && VecEltVT == ResVT) {
+ SDLoc SL(N);
+ SDValue Idx = N->getOperand(1);
+ unsigned Opc = Vec.getOpcode();
- // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
- if (shouldExpandVectorDynExt(N)) {
- SDLoc SL(N);
- SDValue Idx = N->getOperand(1);
- SDValue V;
- for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
- SDValue IC = DAG.getVectorIdxConstant(I, SL);
- SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, ResVT, Vec, IC);
- if (I == 0)
- V = Elt;
- else
- V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
- }
- return V;
+ switch (Opc) {
+ default:
+ break;
+ // TODO: Support other binary operations.
+ case ISD::FADD:
+ case ISD::FSUB:
+ case ISD::FMUL:
+ case ISD::ADD:
+ case ISD::UMIN:
+ case ISD::UMAX:
+ case ISD::SMIN:
+ case ISD::SMAX:
+ case ISD::FMAXNUM:
+ case ISD::FMINNUM:
+ case ISD::FMAXNUM_IEEE:
+ case ISD::FMINNUM_IEEE:
+ case ISD::FMAXIMUM:
+ case ISD::FMINIMUM: {
+ SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, ResVT,
+ Vec.getOperand(0), Idx);
+ SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, ResVT,
+ Vec.getOperand(1), Idx);
+
+ DCI.AddToWorklist(Elt0.getNode());
+ DCI.AddToWorklist(Elt1.getNode());
+ return DAG.getNode(Opc, SL, ResVT, Elt0, Elt1, Vec->getFlags());
+ }
+ }
+ }
+
+ // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
+ if (shouldExpandVectorDynExt(N)) {
+ SDLoc SL(N);
+ SDValue Idx = N->getOperand(1);
+ SDValue V;
+ for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) {
+ SDValue IC = DAG.getVectorIdxConstant(I, SL);
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, ResVT, Vec, IC);
+ if (I == 0)
+ V = Elt;
+ else
+ V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ);
}
+ return V;
+ }
- if (!DCI.isBeforeLegalize())
- return SDValue();
-
- // Try to turn sub-dword accesses of vectors into accesses of the same
- // 32-bit elements. This exposes more load reduction opportunities by
- // replacing multiple small extract_vector_elements with a single 32-bit
- // extract.
- auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
- if (isa<MemSDNode>(Vec) && VecEltSize <= 16 && VecEltVT.isByteSized() &&
- VecSize > 32 && VecSize % 32 == 0 && Idx) {
- EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
-
- unsigned BitIndex = Idx->getZExtValue() * VecEltSize;
- unsigned EltIdx = BitIndex / 32;
- unsigned LeftoverBitIdx = BitIndex % 32;
- SDLoc SL(N);
+ if (!DCI.isBeforeLegalize())
+ return SDValue();
- SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
- DCI.AddToWorklist(Cast.getNode());
+ // Try to turn sub-dword accesses of vectors into accesses of the same
+ // 32-bit elements. This exposes more load reduction opportunities by
+ // replacing multiple small extract_vector_elements with a single 32-bit
+ // extract.
+ auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (isa<MemSDNode>(Vec) && VecEltSize <= 16 && VecEltVT.isByteSized() &&
+ VecSize > 32 && VecSize % 32 == 0 && Idx) {
+ EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT);
+
+ unsigned BitIndex = Idx->getZExtValue() * VecEltSize;
+ unsigned EltIdx = BitIndex / 32;
+ unsigned LeftoverBitIdx = BitIndex % 32;
+ SDLoc SL(N);
- SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
- DAG.getConstant(EltIdx, SL, MVT::i32));
- DCI.AddToWorklist(Elt.getNode());
- SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
- DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
- DCI.AddToWorklist(Srl.getNode());
+ SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec);
+ DCI.AddToWorklist(Cast.getNode());
- EVT VecEltAsIntVT = VecEltVT.changeTypeToInteger();
- SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, VecEltAsIntVT, Srl);
- DCI.AddToWorklist(Trunc.getNode());
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast,
+ DAG.getConstant(EltIdx, SL, MVT::i32));
+ DCI.AddToWorklist(Elt.getNode());
+ SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt,
+ DAG.getConstant(LeftoverBitIdx, SL, MVT::i32));
+ DCI.AddToWorklist(Srl.getNode());
- if (VecEltVT == ResVT) {
- return DAG.getNode(ISD::BITCAST, SL, VecEltVT, Trunc);
- }
+ EVT VecEltAsIntVT = VecEltVT.changeTypeToInteger();
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, VecEltAsIntVT, Srl);
+ DCI.AddToWorklist(Trunc.getNode());
- assert(ResVT.isScalarInteger());
- return DAG.getAnyExtOrTrunc(Trunc, SL, ResVT);
+ if (VecEltVT == ResVT) {
+ return DAG.getNode(ISD::BITCAST, SL, VecEltVT, Trunc);
}
- return SDValue();
+ assert(ResVT.isScalarInteger());
+ return DAG.getAnyExtOrTrunc(Trunc, SL, ResVT);
}
+ return SDValue();
+}
+
SDValue
SITargetLowering::performInsertVectorEltCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
``````````
</details>
https://github.com/llvm/llvm-project/pull/149824
More information about the llvm-commits
mailing list