[llvm] 7e6e636 - Use llvm::has_single_bit<uint32_t> (NFC)
Kazu Hirata via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 15 23:28:01 PST 2023
I understand your concern. I wish the C++20 name were a little shooter and
more descriptive like std::is_pow2 or something. That said, once C++20 is
enabled, we might start seeing a mixture of isPowerOf2_32 and
std::has_single_bit in our code base. I don't think we want to end up
there. Plus, I'd like to make our code base look familiar to those that
are new to it.
I'm happy to take care of the divergence in the APInt interface --
countLeadingZeros, countPopulation, etc.
Kazu Hirata
On Wed, Feb 15, 2023 at 11:06 PM Craig Topper <craig.topper at gmail.com>
wrote:
> Maybe I'm alone here, but I find the C++20 names less readable than what
> we've been using in llvm for years. Does anyone else feel this way? We're
> also diverging these helpers from what's in the APInt interface which
> doesn't seem ideal either.
>
> ~Craig
>
>
> On Wed, Feb 15, 2023 at 10:17 PM Kazu Hirata via llvm-commits <
> llvm-commits at lists.llvm.org> wrote:
>
>>
>> Author: Kazu Hirata
>> Date: 2023-02-15T22:17:27-08:00
>> New Revision: 7e6e636fb683a854de27f56b2da7d157a0b70f4e
>>
>> URL:
>> https://github.com/llvm/llvm-project/commit/7e6e636fb683a854de27f56b2da7d157a0b70f4e
>> DIFF:
>> https://github.com/llvm/llvm-project/commit/7e6e636fb683a854de27f56b2da7d157a0b70f4e.diff
>>
>> LOG: Use llvm::has_single_bit<uint32_t> (NFC)
>>
>> This patch replaces isPowerOf2_32 with llvm::has_single_bit<uint32_t>
>> where the argument is wider than uint32_t.
>>
>> Added:
>>
>>
>> Modified:
>> clang/lib/CodeGen/CGNonTrivialStruct.cpp
>> llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
>> llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
>> llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
>> llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
>> llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
>> llvm/lib/CodeGen/TargetLoweringBase.cpp
>> llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
>> llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
>> llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
>> llvm/lib/Target/ARM/ARMISelLowering.cpp
>> llvm/lib/Target/AVR/AVRISelLowering.cpp
>> llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
>> llvm/lib/Target/PowerPC/PPCISelLowering.cpp
>> llvm/lib/Target/RISCV/RISCVISelLowering.cpp
>> llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
>> llvm/lib/Target/RISCV/RISCVSubtarget.cpp
>> llvm/lib/Target/X86/X86ISelLowering.cpp
>> llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
>> llvm/utils/TableGen/DAGISelMatcherGen.cpp
>>
>> Removed:
>>
>>
>>
>>
>> ################################################################################
>> diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp
>> b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
>> index 7efb4d7efe5af..a10e51b8cb441 100644
>> --- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp
>> +++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
>> @@ -522,7 +522,8 @@ struct GenBinaryFunc : CopyStructVisitor<Derived,
>> IsMove>,
>> Address SrcAddr = this->getAddrWithOffset(Addrs[SrcIdx],
>> this->Start);
>>
>> // Emit memcpy.
>> - if (Size.getQuantity() >= 16 ||
>> !llvm::isPowerOf2_32(Size.getQuantity())) {
>> + if (Size.getQuantity() >= 16 ||
>> + !llvm::has_single_bit<uint32_t>(Size.getQuantity())) {
>> llvm::Value *SizeVal =
>> llvm::ConstantInt::get(this->CGF->SizeTy, Size.getQuantity());
>> DstAddr =
>>
>> diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
>> b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
>> index 5fc267458a7af..81863c1edd6b2 100644
>> --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
>> +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
>> @@ -535,7 +535,7 @@ bool
>> CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
>>
>> // For non power-of-2 types, they will very likely be legalized into
>> multiple
>> // loads. Don't bother trying to match them into extending loads.
>> - if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
>> + if (!llvm::has_single_bit<uint32_t>(LoadValueTy.getSizeInBits()))
>> return false;
>>
>> // Find the preferred type aside from the any-extends (unless it's the
>> only
>>
>> diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
>> b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
>> index aefdf2079486f..fffbb862d390f 100644
>> --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
>> +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
>> @@ -1018,7 +1018,7 @@ void
>> IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
>>
>> LLT MaskTy = SwitchOpTy;
>> if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
>> - !isPowerOf2_32(MaskTy.getSizeInBits()))
>> + !llvm::has_single_bit<uint32_t>(MaskTy.getSizeInBits()))
>> MaskTy = LLT::scalar(PtrTy.getSizeInBits());
>> else {
>> // Ensure that the type will fit the mask value.
>>
>> diff --git a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
>> b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
>> index 54a82cac95d58..2c77ed8b06008 100644
>> --- a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
>> +++ b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
>> @@ -164,7 +164,8 @@ LegalityPredicate
>> LegalityPredicates::sizeNotMultipleOf(unsigned TypeIdx,
>> LegalityPredicate LegalityPredicates::sizeNotPow2(unsigned TypeIdx) {
>> return [=](const LegalityQuery &Query) {
>> const LLT QueryTy = Query.Types[TypeIdx];
>> - return QueryTy.isScalar() && !isPowerOf2_32(QueryTy.getSizeInBits());
>> + return QueryTy.isScalar() &&
>> + !llvm::has_single_bit<uint32_t>(QueryTy.getSizeInBits());
>> };
>> }
>>
>> @@ -184,14 +185,16 @@ LegalityPredicate
>> LegalityPredicates::sameSize(unsigned TypeIdx0,
>>
>> LegalityPredicate LegalityPredicates::memSizeInBytesNotPow2(unsigned
>> MMOIdx) {
>> return [=](const LegalityQuery &Query) {
>> - return
>> !isPowerOf2_32(Query.MMODescrs[MMOIdx].MemoryTy.getSizeInBytes());
>> + return !llvm::has_single_bit<uint32_t>(
>> + Query.MMODescrs[MMOIdx].MemoryTy.getSizeInBytes());
>> };
>> }
>>
>> LegalityPredicate LegalityPredicates::memSizeNotByteSizePow2(unsigned
>> MMOIdx) {
>> return [=](const LegalityQuery &Query) {
>> const LLT MemTy = Query.MMODescrs[MMOIdx].MemoryTy;
>> - return !MemTy.isByteSized() ||
>> !isPowerOf2_32(MemTy.getSizeInBytes());
>> + return !MemTy.isByteSized() ||
>> + !llvm::has_single_bit<uint32_t>(MemTy.getSizeInBytes());
>> };
>> }
>>
>>
>> diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
>> b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
>> index a64026ffbfd45..2332b5b95cc13 100644
>> --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
>> +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
>> @@ -21526,9 +21526,10 @@ SDValue
>> DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {
>> // same source type and all of the inputs must be any or zero extend.
>> // Scalar sizes must be a power of two.
>> EVT OutScalarTy = VT.getScalarType();
>> - bool ValidTypes = SourceType != MVT::Other &&
>> - isPowerOf2_32(OutScalarTy.getSizeInBits()) &&
>> - isPowerOf2_32(SourceType.getSizeInBits());
>> + bool ValidTypes =
>> + SourceType != MVT::Other &&
>> + llvm::has_single_bit<uint32_t>(OutScalarTy.getSizeInBits()) &&
>> + llvm::has_single_bit<uint32_t>(SourceType.getSizeInBits());
>>
>> // Create a new simpler BUILD_VECTOR sequence which other
>> optimizations can
>> // turn into a single shuffle instruction.
>>
>> diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
>> b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
>> index ebfaf06bd8787..25d4223eafecf 100644
>> --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
>> +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
>> @@ -4272,7 +4272,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT,
>> SDValue N0, SDValue N1,
>> // zero.
>> if (N0.getOpcode() == ISD::SRL && (C1.isZero() || C1.isOne()) &&
>> N0.getOperand(0).getOpcode() == ISD::CTLZ &&
>> - isPowerOf2_32(N0.getScalarValueSizeInBits())) {
>> + llvm::has_single_bit<uint32_t>(N0.getScalarValueSizeInBits())) {
>> if (ConstantSDNode *ShAmt = isConstOrConstSplat(N0.getOperand(1)))
>> {
>> if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
>> ShAmt->getAPIntValue() ==
>> Log2_32(N0.getScalarValueSizeInBits())) {
>>
>> diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp
>> b/llvm/lib/CodeGen/TargetLoweringBase.cpp
>> index de8d058311bae..1ff884047d86a 100644
>> --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
>> +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
>> @@ -1626,7 +1626,7 @@ unsigned
>> TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context,
>> if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 ->
>> i16.
>> TypeSize NewVTSize = NewVT.getSizeInBits();
>> // Convert sizes such as i33 to i64.
>> - if (!isPowerOf2_32(NewVTSize.getKnownMinValue()))
>> + if (!llvm::has_single_bit<uint32_t>(NewVTSize.getKnownMinValue()))
>> NewVTSize = NewVTSize.coefficientNextPowerOf2();
>> return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
>> }
>>
>> diff --git
>> a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
>> b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
>> index d6f14c2e61311..b1b22d883dc49 100644
>> --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
>> +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
>> @@ -6164,7 +6164,7 @@ AArch64InstructionSelector::selectExtendedSHL(
>> // Since we're going to pull this into a shift, the constant value
>> must be
>> // a power of 2. If we got a multiply, then we need to check this.
>> if (OffsetOpc == TargetOpcode::G_MUL) {
>> - if (!isPowerOf2_32(ImmVal))
>> + if (!llvm::has_single_bit<uint32_t>(ImmVal))
>> return std::nullopt;
>>
>> // Got a power of 2. So, the amount we'll shift is the log base-2 of
>> that.
>>
>> diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
>> b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
>> index ab99c5cefdb9d..5feeff780d775 100644
>> --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
>> +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
>> @@ -981,8 +981,8 @@ bool AArch64LegalizerInfo::legalizeVectorTrunc(
>> Register SrcReg = MI.getOperand(1).getReg();
>> LLT DstTy = MRI.getType(DstReg);
>> LLT SrcTy = MRI.getType(SrcReg);
>> - assert(isPowerOf2_32(DstTy.getSizeInBits()) &&
>> - isPowerOf2_32(SrcTy.getSizeInBits()));
>> + assert(llvm::has_single_bit<uint32_t>(DstTy.getSizeInBits()) &&
>> + llvm::has_single_bit<uint32_t>(SrcTy.getSizeInBits()));
>>
>> // Split input type.
>> LLT SplitSrcTy =
>>
>> diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
>> b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
>> index a9459b20f8380..a97cb20911eba 100644
>> --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
>> +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
>> @@ -1580,7 +1580,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const
>> GCNSubtarget &ST_,
>> const LLT &EltTy = Ty.getElementType();
>> if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 512)
>> return true;
>> - if (!isPowerOf2_32(EltTy.getSizeInBits()))
>> + if (!llvm::has_single_bit<uint32_t>(EltTy.getSizeInBits()))
>> return true;
>> }
>> return false;
>> @@ -1628,8 +1628,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const
>> GCNSubtarget &ST_,
>> Builder.widenScalarIf(
>> [=](const LegalityQuery &Query) {
>> const LLT Ty = Query.Types[BigTyIdx];
>> - return !isPowerOf2_32(Ty.getSizeInBits()) &&
>> - Ty.getSizeInBits() % 16 != 0;
>> + return !llvm::has_single_bit<uint32_t>(Ty.getSizeInBits()) &&
>> + Ty.getSizeInBits() % 16 != 0;
>> },
>> [=](const LegalityQuery &Query) {
>> // Pick the next power of 2, or a multiple of 64 over 128.
>>
>> diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp
>> b/llvm/lib/Target/ARM/ARMISelLowering.cpp
>> index 6370ac39c264e..8a5e37a7adfd3 100644
>> --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
>> +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
>> @@ -14089,7 +14089,7 @@ static SDValue PerformMULCombine(SDNode *N,
>> MulAmt >>= ShiftAmt;
>>
>> if (MulAmt >= 0) {
>> - if (isPowerOf2_32(MulAmt - 1)) {
>> + if (llvm::has_single_bit<uint32_t>(MulAmt - 1)) {
>> // (mul x, 2^N + 1) => (add (shl x, N), x)
>> Res = DAG.getNode(ISD::ADD, DL, VT,
>> V,
>> @@ -14097,7 +14097,7 @@ static SDValue PerformMULCombine(SDNode *N,
>> V,
>> DAG.getConstant(Log2_32(MulAmt - 1),
>> DL,
>> MVT::i32)));
>> - } else if (isPowerOf2_32(MulAmt + 1)) {
>> + } else if (llvm::has_single_bit<uint32_t>(MulAmt + 1)) {
>> // (mul x, 2^N - 1) => (sub (shl x, N), x)
>> Res = DAG.getNode(ISD::SUB, DL, VT,
>> DAG.getNode(ISD::SHL, DL, VT,
>> @@ -14109,7 +14109,7 @@ static SDValue PerformMULCombine(SDNode *N,
>> return SDValue();
>> } else {
>> uint64_t MulAmtAbs = -MulAmt;
>> - if (isPowerOf2_32(MulAmtAbs + 1)) {
>> + if (llvm::has_single_bit<uint32_t>(MulAmtAbs + 1)) {
>> // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
>> Res = DAG.getNode(ISD::SUB, DL, VT,
>> V,
>> @@ -14117,7 +14117,7 @@ static SDValue PerformMULCombine(SDNode *N,
>> V,
>> DAG.getConstant(Log2_32(MulAmtAbs +
>> 1), DL,
>> MVT::i32)));
>> - } else if (isPowerOf2_32(MulAmtAbs - 1)) {
>> + } else if (llvm::has_single_bit<uint32_t>(MulAmtAbs - 1)) {
>> // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
>> Res = DAG.getNode(ISD::ADD, DL, VT,
>> V,
>>
>> diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp
>> b/llvm/lib/Target/AVR/AVRISelLowering.cpp
>> index 6c60d87c4704f..86935a202456e 100644
>> --- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
>> +++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
>> @@ -282,7 +282,7 @@ SDValue AVRTargetLowering::LowerShifts(SDValue Op,
>> SelectionDAG &DAG) const {
>> const SDNode *N = Op.getNode();
>> EVT VT = Op.getValueType();
>> SDLoc dl(N);
>> - assert(isPowerOf2_32(VT.getSizeInBits()) &&
>> + assert(llvm::has_single_bit<uint32_t>(VT.getSizeInBits()) &&
>> "Expected power-of-2 shift amount");
>>
>> if (VT.getSizeInBits() == 32) {
>>
>> diff --git a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
>> b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
>> index 187563aafe39c..726aa28915e0c 100644
>> --- a/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
>> +++ b/llvm/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
>> @@ -441,8 +441,10 @@ struct PPCOperand : public MCParsedAsmOperand {
>>
>> bool isEvenRegNumber() const { return isRegNumber() && (getImm() & 1)
>> == 0; }
>>
>> - bool isCRBitMask() const { return Kind == Immediate &&
>> isUInt<8>(getImm()) &&
>> - isPowerOf2_32(getImm()); }
>> + bool isCRBitMask() const {
>> + return Kind == Immediate && isUInt<8>(getImm()) &&
>> + llvm::has_single_bit<uint32_t>(getImm());
>> + }
>> bool isATBitsAsHint() const { return false; }
>> bool isMem() const override { return false; }
>> bool isReg() const override { return false; }
>>
>> diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
>> b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
>> index e0ec64a00ae1c..0dbb230214468 100644
>> --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
>> +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
>> @@ -7843,15 +7843,15 @@ SDValue
>> PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
>> EVT EltVT = TrgVT.getVectorElementType();
>> if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
>> TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
>> - !isPowerOf2_32(EltVT.getSizeInBits()))
>> + !llvm::has_single_bit<uint32_t>(EltVT.getSizeInBits()))
>> return SDValue();
>>
>> SDValue N1 = Op.getOperand(0);
>> EVT SrcVT = N1.getValueType();
>> unsigned SrcSize = SrcVT.getSizeInBits();
>> - if (SrcSize > 256 ||
>> - !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
>> - !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits()))
>> + if (SrcSize > 256 || !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
>> + !llvm::has_single_bit<uint32_t>(
>> + SrcVT.getVectorElementType().getSizeInBits()))
>> return SDValue();
>> if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
>> return SDValue();
>>
>> diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
>> b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
>> index 5967018c199bc..4430c97f1b346 100644
>> --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
>> +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
>> @@ -9784,7 +9784,7 @@ static SDValue performBITREVERSECombine(SDNode *N,
>> SelectionDAG &DAG,
>>
>> EVT VT = N->getValueType(0);
>> if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen()
>> ||
>> - !isPowerOf2_32(VT.getSizeInBits()))
>> + !llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
>> return SDValue();
>>
>> SDLoc DL(N);
>>
>> diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
>> b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
>> index 411f2d63fc478..472492b6edf2f 100644
>> --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
>> +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
>> @@ -2441,7 +2441,7 @@ void
>> RISCVInstrInfo::getVLENFactoredAmount(MachineFunction &MF,
>> BuildMI(MBB, II, DL, get(RISCV::PseudoReadVLENB),
>> DestReg).setMIFlag(Flag);
>> assert(isInt<32>(NumOfVReg) &&
>> "Expect the number of vector registers within 32-bits.");
>> - if (isPowerOf2_32(NumOfVReg)) {
>> + if (llvm::has_single_bit<uint32_t>(NumOfVReg)) {
>> uint32_t ShiftAmount = Log2_32(NumOfVReg);
>> if (ShiftAmount == 0)
>> return;
>> @@ -2477,7 +2477,7 @@ void
>> RISCVInstrInfo::getVLENFactoredAmount(MachineFunction &MF,
>> .addReg(DestReg, RegState::Kill)
>> .addReg(DestReg)
>> .setMIFlag(Flag);
>> - } else if (isPowerOf2_32(NumOfVReg - 1)) {
>> + } else if (llvm::has_single_bit<uint32_t>(NumOfVReg - 1)) {
>> Register ScaledRegister =
>> MRI.createVirtualRegister(&RISCV::GPRRegClass);
>> uint32_t ShiftAmount = Log2_32(NumOfVReg - 1);
>> BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
>> @@ -2488,7 +2488,7 @@ void
>> RISCVInstrInfo::getVLENFactoredAmount(MachineFunction &MF,
>> .addReg(ScaledRegister, RegState::Kill)
>> .addReg(DestReg, RegState::Kill)
>> .setMIFlag(Flag);
>> - } else if (isPowerOf2_32(NumOfVReg + 1)) {
>> + } else if (llvm::has_single_bit<uint32_t>(NumOfVReg + 1)) {
>> Register ScaledRegister =
>> MRI.createVirtualRegister(&RISCV::GPRRegClass);
>> uint32_t ShiftAmount = Log2_32(NumOfVReg + 1);
>> BuildMI(MBB, II, DL, get(RISCV::SLLI), ScaledRegister)
>>
>> diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
>> b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
>> index 1101d7eeeff8b..a6700fa3f4ea7 100644
>> --- a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
>> +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
>> @@ -158,7 +158,8 @@ unsigned RISCVSubtarget::getMinRVVVectorSizeInBits()
>> const {
>> unsigned RISCVSubtarget::getMaxLMULForFixedLengthVectors() const {
>> assert(hasVInstructions() &&
>> "Tried to get vector length without Zve or V extension
>> support!");
>> - assert(RVVVectorLMULMax <= 8 && isPowerOf2_32(RVVVectorLMULMax) &&
>> + assert(RVVVectorLMULMax <= 8 &&
>> + llvm::has_single_bit<uint32_t>(RVVVectorLMULMax) &&
>> "V extension requires a LMUL to be at most 8 and a power of
>> 2!");
>> return llvm::bit_floor(std::clamp<unsigned>(RVVVectorLMULMax, 1, 8));
>> }
>>
>> diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp
>> b/llvm/lib/Target/X86/X86ISelLowering.cpp
>> index 50f0ef63dc7ed..b3a1cbe6e7b65 100644
>> --- a/llvm/lib/Target/X86/X86ISelLowering.cpp
>> +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
>> @@ -24025,7 +24025,7 @@ static SDValue LowerVectorAllZero(const SDLoc
>> &DL, SDValue V, ISD::CondCode CC,
>> }
>>
>> // Quit if not splittable to 128/256-bit vector.
>> - if (!isPowerOf2_32(VT.getSizeInBits()))
>> + if (!llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
>> return SDValue();
>>
>> // Split down to 128/256-bit vector.
>> @@ -24095,7 +24095,8 @@ static SDValue MatchVectorAllZeroTest(SDValue Op,
>> ISD::CondCode CC,
>> "Reduction source vector mismatch");
>>
>> // Quit if less than 128-bits or not splittable to 128/256-bit
>> vector.
>> - if (VT.getSizeInBits() < 128 || !isPowerOf2_32(VT.getSizeInBits()))
>> + if (VT.getSizeInBits() < 128 ||
>> + !llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
>> return SDValue();
>>
>> // If more than one full vector is evaluated, OR them first before
>> PTEST.
>> @@ -40361,9 +40362,10 @@ static SDValue combineX86ShufflesRecursively(
>> // This function can be performance-critical, so we rely on the
>> power-of-2
>> // knowledge that we have about the mask sizes to replace div/rem
>> ops with
>> // bit-masks and shifts.
>> - assert(isPowerOf2_32(RootMask.size()) &&
>> + assert(llvm::has_single_bit<uint32_t>(RootMask.size()) &&
>> + "Non-power-of-2 shuffle mask sizes");
>> + assert(llvm::has_single_bit<uint32_t>(OpMask.size()) &&
>> "Non-power-of-2 shuffle mask sizes");
>> - assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask
>> sizes");
>> unsigned RootMaskSizeLog2 = llvm::countr_zero(RootMask.size());
>> unsigned OpMaskSizeLog2 = llvm::countr_zero(OpMask.size());
>>
>>
>> diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
>> b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
>> index e10954681b956..f1aa79515963d 100644
>> --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
>> +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
>> @@ -5010,7 +5010,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL,
>> unsigned Depth,
>> return
>> isa<UndefValue>(V) ||
>> !isConstant(V);
>> })) ||
>> - !llvm::isPowerOf2_32(NumUniqueScalarValues)) {
>> + !llvm::has_single_bit<uint32_t>(NumUniqueScalarValues)) {
>> LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
>> newTreeEntry(VL, std::nullopt /*not vectorized*/, S,
>> UserTreeIdx);
>> return false;
>>
>> diff --git a/llvm/utils/TableGen/DAGISelMatcherGen.cpp
>> b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
>> index 44bff4c67ab31..31a8d03175960 100644
>> --- a/llvm/utils/TableGen/DAGISelMatcherGen.cpp
>> +++ b/llvm/utils/TableGen/DAGISelMatcherGen.cpp
>> @@ -347,7 +347,8 @@ void MatcherGen::EmitOperatorMatchCode(const
>> TreePatternNode *N,
>> N->getChild(1)->isLeaf() &&
>> N->getChild(1)->getPredicateCalls().empty() &&
>> N->getPredicateCalls().empty()) {
>> if (IntInit *II = dyn_cast<IntInit>(N->getChild(1)->getLeafValue()))
>> {
>> - if (!isPowerOf2_32(II->getValue())) { // Don't bother with single
>> bits.
>> + if (!llvm::has_single_bit<uint32_t>(
>> + II->getValue())) { // Don't bother with single bits.
>> // If this is at the root of the pattern, we emit a redundant
>> // CheckOpcode so that the following checks get factored
>> properly under
>> // a single opcode check.
>>
>>
>>
>> _______________________________________________
>> llvm-commits mailing list
>> llvm-commits at lists.llvm.org
>> https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
>>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20230215/c40ab51e/attachment.html>
More information about the llvm-commits
mailing list