[llvm] [AMDGPU] Rework dot4 signedness checks (PR #68757)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 6 16:56:17 PST 2023
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {clang-format}-->
:warning: C/C++ code formatter, clang-format found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
git-clang-format --diff 3c356eef31ab309466c198de3915037b068d8861 2dc29fd51a5c349d95f47d1c477319318b65b34b -- llvm/include/llvm/CodeGen/ByteProvider.h llvm/lib/Target/AMDGPU/SIISelLowering.cpp
``````````
</details>
<details>
<summary>
View the diff from clang-format here.
</summary>
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 7a70014105be..ae2c5b6abd4d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -269,12 +269,12 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
// We only support LOAD/STORE and vector manipulation ops for vectors
// with > 4 elements.
for (MVT VT :
- {MVT::v8i32, MVT::v8f32, MVT::v9i32, MVT::v9f32, MVT::v10i32,
- MVT::v10f32, MVT::v11i32, MVT::v11f32, MVT::v12i32, MVT::v12f32,
- MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64, MVT::v4i16,
- MVT::v4f16, MVT::v3i64, MVT::v3f64, MVT::v6i32, MVT::v6f32,
- MVT::v4i64, MVT::v4f64, MVT::v8i64, MVT::v8f64, MVT::v8i16,
- MVT::v8f16, MVT::v16i16, MVT::v16f16, MVT::v16i64, MVT::v16f64,
+ {MVT::v8i32, MVT::v8f32, MVT::v9i32, MVT::v9f32, MVT::v10i32,
+ MVT::v10f32, MVT::v11i32, MVT::v11f32, MVT::v12i32, MVT::v12f32,
+ MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64, MVT::v4i16,
+ MVT::v4f16, MVT::v3i64, MVT::v3f64, MVT::v6i32, MVT::v6f32,
+ MVT::v4i64, MVT::v4f64, MVT::v8i64, MVT::v8f64, MVT::v8i16,
+ MVT::v8f16, MVT::v16i16, MVT::v16f16, MVT::v16i64, MVT::v16f64,
MVT::v32i32, MVT::v32f32}) {
for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
switch (Op) {
@@ -3596,7 +3596,7 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
// Add a register mask operand representing the call-preserved registers.
- auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
+ auto *TRI = static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
Ops.push_back(DAG.getRegisterMask(Mask));
@@ -3610,8 +3610,9 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
// actual call instruction.
if (IsTailCall) {
MFI.setHasTailCall();
- unsigned OPC = CallConv == CallingConv::AMDGPU_Gfx ?
- AMDGPUISD::TC_RETURN_GFX : AMDGPUISD::TC_RETURN;
+ unsigned OPC = CallConv == CallingConv::AMDGPU_Gfx
+ ? AMDGPUISD::TC_RETURN_GFX
+ : AMDGPUISD::TC_RETURN;
return DAG.getNode(OPC, DL, NodeTys, Ops);
}
@@ -4738,17 +4739,17 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy)
.addReg(SrcCond);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
- .addImm(0)
- .addReg(Src0, 0, AMDGPU::sub0)
- .addImm(0)
- .addReg(Src1, 0, AMDGPU::sub0)
- .addReg(SrcCondCopy);
+ .addImm(0)
+ .addReg(Src0, 0, AMDGPU::sub0)
+ .addImm(0)
+ .addReg(Src1, 0, AMDGPU::sub0)
+ .addReg(SrcCondCopy);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
- .addImm(0)
- .addReg(Src0, 0, AMDGPU::sub1)
- .addImm(0)
- .addReg(Src1, 0, AMDGPU::sub1)
- .addReg(SrcCondCopy);
+ .addImm(0)
+ .addReg(Src0, 0, AMDGPU::sub1)
+ .addImm(0)
+ .addReg(Src1, 0, AMDGPU::sub1)
+ .addReg(SrcCondCopy);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
.addReg(DstLo)
@@ -11029,7 +11030,6 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
return std::nullopt;
uint64_t NarrowByteWidth = NarrowBitWidth / 8;
-
if (Index >= NarrowByteWidth)
return Op.getOpcode() == ISD::ZERO_EXTEND
? std::optional<ByteProvider<SDValue>>(
@@ -14154,8 +14154,8 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
if (!MI.getDesc().operands().empty()) {
unsigned Opc = MI.getOpcode();
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
- for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
- AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) {
+ for (auto I : {AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
+ AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1)}) {
if (I == -1)
break;
MachineOperand &Op = MI.getOperand(I);
``````````
</details>
https://github.com/llvm/llvm-project/pull/68757
More information about the llvm-commits
mailing list